2020-10-13 12:53:23 +02:00
|
|
|
import itertools
|
|
|
|
from collections import defaultdict
|
|
|
|
from dataclasses import dataclass
|
|
|
|
from operator import itemgetter
|
2021-05-11 13:55:49 +02:00
|
|
|
from typing import AbstractSet, Any, Dict, List, Optional, Set
|
2017-10-29 15:52:01 +01:00
|
|
|
|
2021-05-11 13:55:49 +02:00
|
|
|
from django.db.models import Q, QuerySet
|
2020-06-11 00:54:34 +02:00
|
|
|
|
2021-05-11 13:55:49 +02:00
|
|
|
from zerver.models import AlertWord, Realm, Recipient, Stream, Subscription, UserProfile
|
2020-06-11 00:54:34 +02:00
|
|
|
|
2017-10-29 15:40:07 +01:00
|
|
|
|
2020-10-16 14:17:32 +02:00
|
|
|
@dataclass
|
|
|
|
class SubInfo:
|
|
|
|
user: UserProfile
|
|
|
|
sub: Subscription
|
|
|
|
stream: Stream
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-10-13 12:53:23 +02:00
|
|
|
@dataclass
|
|
|
|
class SubscriberPeerInfo:
|
|
|
|
subscribed_ids: Dict[int, Set[int]]
|
2020-10-20 17:46:31 +02:00
|
|
|
private_peer_dict: Dict[int, Set[int]]
|
2020-10-13 12:53:23 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-04-17 13:29:55 +02:00
|
|
|
def get_active_subscriptions_for_stream_id(
|
|
|
|
stream_id: int, *, include_deactivated_users: bool
|
|
|
|
) -> QuerySet:
|
2018-03-15 00:32:42 +01:00
|
|
|
# TODO: Change return type to QuerySet[Subscription]
|
2021-04-17 13:29:55 +02:00
|
|
|
query = Subscription.objects.filter(
|
2017-10-29 15:40:07 +01:00
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
recipient__type_id=stream_id,
|
|
|
|
active=True,
|
|
|
|
)
|
2021-04-17 13:29:55 +02:00
|
|
|
if not include_deactivated_users:
|
|
|
|
# Note that non-active users may still have "active" subscriptions, because we
|
|
|
|
# want to be able to easily reactivate them with their old subscriptions. This
|
|
|
|
# is why the query here has to look at the is_user_active flag.
|
|
|
|
query = query.filter(is_user_active=True)
|
|
|
|
|
|
|
|
return query
|
2017-10-29 15:40:07 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-10-15 14:20:50 +02:00
|
|
|
def get_active_subscriptions_for_stream_ids(stream_ids: Set[int]) -> QuerySet:
|
2018-03-15 00:32:42 +01:00
|
|
|
# TODO: Change return type to QuerySet[Subscription]
|
2017-10-29 15:52:01 +01:00
|
|
|
return Subscription.objects.filter(
|
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
recipient__type_id__in=stream_ids,
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
active=True,
|
2021-04-19 22:36:06 +02:00
|
|
|
is_user_active=True,
|
2017-10-29 15:52:01 +01:00
|
|
|
)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
perf: Extract get_subscribed_stream_ids_for_user.
This new method prevents us from getting fat
objects from the database.
Instead, now we just get ids from the database
to build our subqueries.
Note that we could also technically eliminate
the `set(...)` wrappers in this code to have
Django make a subquery and save a round trip.
I am postponing that for another commit (since
it's still somewhat coupled to some other
complexity in `do_get_streams` that I am trying
to cut through, plus it's not the main point
of this commit.)
BEFORE:
# old, still in use for other codepaths
def get_stream_subscriptions_for_user(user_profile: UserProfile) -> QuerySet:
# TODO: Change return type to QuerySet[Subscription]
return Subscription.objects.filter(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
)
user_subs = get_stream_subscriptions_for_user(user_profile).filter(
active=True,
).select_related('recipient')
recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])
AFTER:
# newly added
def get_subscribed_stream_ids_for_user(user_profile: UserProfile) -> QuerySet:
return Subscription.objects.filter(
user_profile_id=user_profile,
recipient__type=Recipient.STREAM,
active=True,
).values_list('recipient__type_id', flat=True)
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
2020-02-29 18:41:41 +01:00
|
|
|
def get_subscribed_stream_ids_for_user(user_profile: UserProfile) -> QuerySet:
|
|
|
|
return Subscription.objects.filter(
|
|
|
|
user_profile_id=user_profile,
|
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
active=True,
|
2021-02-12 08:20:45 +01:00
|
|
|
).values_list("recipient__type_id", flat=True)
|
perf: Extract get_subscribed_stream_ids_for_user.
This new method prevents us from getting fat
objects from the database.
Instead, now we just get ids from the database
to build our subqueries.
Note that we could also technically eliminate
the `set(...)` wrappers in this code to have
Django make a subquery and save a round trip.
I am postponing that for another commit (since
it's still somewhat coupled to some other
complexity in `do_get_streams` that I am trying
to cut through, plus it's not the main point
of this commit.)
BEFORE:
# old, still in use for other codepaths
def get_stream_subscriptions_for_user(user_profile: UserProfile) -> QuerySet:
# TODO: Change return type to QuerySet[Subscription]
return Subscription.objects.filter(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
)
user_subs = get_stream_subscriptions_for_user(user_profile).filter(
active=True,
).select_related('recipient')
recipient_check = Q(id__in=[sub.recipient.type_id for sub in user_subs])
AFTER:
# newly added
def get_subscribed_stream_ids_for_user(user_profile: UserProfile) -> QuerySet:
return Subscription.objects.filter(
user_profile_id=user_profile,
recipient__type=Recipient.STREAM,
active=True,
).values_list('recipient__type_id', flat=True)
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
2020-02-29 18:41:41 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-05-12 23:40:58 +02:00
|
|
|
def get_subscribed_stream_recipient_ids_for_user(user_profile: UserProfile) -> QuerySet:
|
|
|
|
return Subscription.objects.filter(
|
|
|
|
user_profile_id=user_profile,
|
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
active=True,
|
|
|
|
).values_list("recipient_id", flat=True)
|
|
|
|
|
|
|
|
|
2018-03-15 00:32:42 +01:00
|
|
|
def get_stream_subscriptions_for_user(user_profile: UserProfile) -> QuerySet:
|
|
|
|
# TODO: Change return type to QuerySet[Subscription]
|
2017-10-29 17:11:11 +01:00
|
|
|
return Subscription.objects.filter(
|
|
|
|
user_profile=user_profile,
|
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-03-15 00:32:42 +01:00
|
|
|
def get_stream_subscriptions_for_users(user_profiles: List[UserProfile]) -> QuerySet:
|
|
|
|
# TODO: Change return type to QuerySet[Subscription]
|
2017-10-29 19:15:35 +01:00
|
|
|
return Subscription.objects.filter(
|
|
|
|
user_profile__in=user_profiles,
|
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def get_bulk_stream_subscriber_info(
|
2020-10-16 14:51:01 +02:00
|
|
|
users: List[UserProfile],
|
|
|
|
streams: List[Stream],
|
|
|
|
) -> Dict[int, List[SubInfo]]:
|
2017-10-29 20:19:57 +01:00
|
|
|
|
2020-10-16 14:51:01 +02:00
|
|
|
stream_ids = {stream.id for stream in streams}
|
2017-10-29 20:19:57 +01:00
|
|
|
|
|
|
|
subs = Subscription.objects.filter(
|
2020-10-16 14:51:01 +02:00
|
|
|
user_profile__in=users,
|
2017-10-29 20:19:57 +01:00
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
recipient__type_id__in=stream_ids,
|
|
|
|
active=True,
|
2021-02-12 08:20:45 +01:00
|
|
|
).only("user_profile_id", "recipient_id")
|
2020-10-16 14:51:01 +02:00
|
|
|
|
|
|
|
stream_map = {stream.recipient_id: stream for stream in streams}
|
|
|
|
user_map = {user.id: user for user in users}
|
|
|
|
|
|
|
|
result: Dict[int, List[SubInfo]] = {user.id: [] for user in users}
|
2017-10-29 20:19:57 +01:00
|
|
|
|
|
|
|
for sub in subs:
|
2020-10-16 14:51:01 +02:00
|
|
|
user_id = sub.user_profile_id
|
|
|
|
user = user_map[user_id]
|
|
|
|
recipient_id = sub.recipient_id
|
|
|
|
stream = stream_map[recipient_id]
|
|
|
|
sub_info = SubInfo(
|
|
|
|
user=user,
|
|
|
|
sub=sub,
|
|
|
|
stream=stream,
|
|
|
|
)
|
|
|
|
|
|
|
|
result[user_id].append(sub_info)
|
2017-10-29 20:19:57 +01:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def num_subscribers_for_stream_id(stream_id: int) -> int:
|
2021-04-17 13:29:55 +02:00
|
|
|
return get_active_subscriptions_for_stream_id(
|
|
|
|
stream_id, include_deactivated_users=False
|
|
|
|
).count()
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-02-13 10:22:16 +01:00
|
|
|
|
2020-10-13 12:53:23 +02:00
|
|
|
def get_user_ids_for_streams(stream_ids: Set[int]) -> Dict[int, Set[int]]:
|
2021-02-12 08:19:30 +01:00
|
|
|
all_subs = (
|
|
|
|
get_active_subscriptions_for_stream_ids(stream_ids)
|
|
|
|
.values(
|
2021-02-12 08:20:45 +01:00
|
|
|
"recipient__type_id",
|
|
|
|
"user_profile_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
.order_by(
|
2021-02-12 08:20:45 +01:00
|
|
|
"recipient__type_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2020-10-13 12:53:23 +02:00
|
|
|
)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
get_stream_id = itemgetter("recipient__type_id")
|
2020-10-13 12:53:23 +02:00
|
|
|
|
|
|
|
result: Dict[int, Set[int]] = defaultdict(set)
|
|
|
|
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
|
2021-02-12 08:20:45 +01:00
|
|
|
user_ids = {row["user_profile_id"] for row in rows}
|
2020-10-13 12:53:23 +02:00
|
|
|
result[stream_id] = user_ids
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-10-13 12:53:23 +02:00
|
|
|
def bulk_get_subscriber_peer_info(
|
|
|
|
realm: Realm,
|
|
|
|
streams: List[Stream],
|
|
|
|
) -> SubscriberPeerInfo:
|
|
|
|
"""
|
|
|
|
Glossary:
|
|
|
|
|
|
|
|
subscribed_ids:
|
|
|
|
This shows the users who are actually subscribed to the
|
|
|
|
stream, which we generally send to the person subscribing
|
|
|
|
to the stream.
|
|
|
|
|
2020-10-20 17:46:31 +02:00
|
|
|
private_peer_dict:
|
2020-10-13 12:53:23 +02:00
|
|
|
These are the folks that need to know about a new subscriber.
|
|
|
|
It's usually a superset of the subscribers.
|
2020-10-20 17:46:31 +02:00
|
|
|
|
|
|
|
Note that we only compute this for PRIVATE streams. We
|
|
|
|
let other code handle peers for public streams, since the
|
|
|
|
peers for all public streams are actually the same group
|
|
|
|
of users, and downstream code can use that property of
|
|
|
|
public streams to avoid extra work.
|
2020-10-13 12:53:23 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
subscribed_ids = {}
|
2020-10-20 17:46:31 +02:00
|
|
|
private_peer_dict = {}
|
2020-10-13 12:53:23 +02:00
|
|
|
|
|
|
|
private_stream_ids = {stream.id for stream in streams if stream.invite_only}
|
|
|
|
public_stream_ids = {stream.id for stream in streams if not stream.invite_only}
|
|
|
|
|
|
|
|
stream_user_ids = get_user_ids_for_streams(private_stream_ids | public_stream_ids)
|
|
|
|
|
|
|
|
if private_stream_ids:
|
|
|
|
realm_admin_ids = {user.id for user in realm.get_admin_users_and_bots()}
|
|
|
|
|
|
|
|
for stream_id in private_stream_ids:
|
2020-10-20 17:46:31 +02:00
|
|
|
# This is the same business rule as we use in
|
|
|
|
# bulk_get_private_peers. Realm admins can see all private stream
|
|
|
|
# subscribers.
|
2020-10-13 12:53:23 +02:00
|
|
|
subscribed_user_ids = stream_user_ids.get(stream_id, set())
|
|
|
|
subscribed_ids[stream_id] = subscribed_user_ids
|
2020-10-20 17:46:31 +02:00
|
|
|
private_peer_dict[stream_id] = subscribed_user_ids | realm_admin_ids
|
2020-10-13 12:53:23 +02:00
|
|
|
|
2020-10-20 17:46:31 +02:00
|
|
|
for stream_id in public_stream_ids:
|
|
|
|
subscribed_user_ids = stream_user_ids.get(stream_id, set())
|
|
|
|
subscribed_ids[stream_id] = subscribed_user_ids
|
2020-10-13 12:53:23 +02:00
|
|
|
|
|
|
|
return SubscriberPeerInfo(
|
|
|
|
subscribed_ids=subscribed_ids,
|
2020-10-20 17:46:31 +02:00
|
|
|
private_peer_dict=private_peer_dict,
|
2020-10-13 12:53:23 +02:00
|
|
|
)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-10-20 17:46:31 +02:00
|
|
|
def bulk_get_private_peers(
|
2020-10-13 12:53:23 +02:00
|
|
|
realm: Realm,
|
2020-10-20 17:46:31 +02:00
|
|
|
private_streams: List[Stream],
|
2020-10-13 12:53:23 +02:00
|
|
|
) -> Dict[int, Set[int]]:
|
|
|
|
|
2020-10-20 17:46:31 +02:00
|
|
|
if not private_streams:
|
|
|
|
return {}
|
2020-10-13 12:53:23 +02:00
|
|
|
|
2020-10-20 17:46:31 +02:00
|
|
|
for stream in private_streams:
|
|
|
|
# Our caller should only pass us private streams.
|
|
|
|
assert stream.invite_only
|
2020-10-13 12:53:23 +02:00
|
|
|
|
2020-10-20 17:46:31 +02:00
|
|
|
peer_ids: Dict[int, Set[int]] = {}
|
2020-10-13 12:53:23 +02:00
|
|
|
|
2020-10-20 17:46:31 +02:00
|
|
|
realm_admin_ids = {user.id for user in realm.get_admin_users_and_bots()}
|
|
|
|
|
|
|
|
stream_ids = {stream.id for stream in private_streams}
|
|
|
|
stream_user_ids = get_user_ids_for_streams(stream_ids)
|
2020-10-13 12:53:23 +02:00
|
|
|
|
2020-10-20 17:46:31 +02:00
|
|
|
for stream in private_streams:
|
|
|
|
# This is the same business rule as we use in
|
|
|
|
# bulk_get_subscriber_peer_info. Realm admins can see all private
|
|
|
|
# stream subscribers.
|
|
|
|
subscribed_user_ids = stream_user_ids.get(stream.id, set())
|
|
|
|
peer_ids[stream.id] = subscribed_user_ids | realm_admin_ids
|
2020-10-13 12:53:23 +02:00
|
|
|
|
|
|
|
return peer_ids
|
2019-02-13 10:22:16 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def handle_stream_notifications_compatibility(
|
|
|
|
user_profile: Optional[UserProfile],
|
|
|
|
stream_dict: Dict[str, Any],
|
|
|
|
notification_settings_null: bool,
|
|
|
|
) -> None:
|
2019-02-13 10:22:16 +01:00
|
|
|
# Old versions of the mobile apps don't support `None` as a
|
|
|
|
# value for the stream-level notifications properties, so we
|
|
|
|
# have to handle the normally frontend-side defaults for these
|
|
|
|
# settings here for those older clients.
|
|
|
|
#
|
|
|
|
# Note that this situation results in these older mobile apps
|
|
|
|
# having a subtle bug where changes to the user-level stream
|
|
|
|
# notification defaults will not properly propagate to the
|
|
|
|
# mobile app "stream notification settings" UI until the app
|
|
|
|
# re-registers. This is an acceptable level of
|
|
|
|
# backwards-compatibility problem in our view.
|
|
|
|
assert not notification_settings_null
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
for notification_type in [
|
|
|
|
"desktop_notifications",
|
|
|
|
"audible_notifications",
|
|
|
|
"push_notifications",
|
|
|
|
"email_notifications",
|
|
|
|
]:
|
2019-02-13 10:22:16 +01:00
|
|
|
# Values of true/false are supported by older clients.
|
|
|
|
if stream_dict[notification_type] is not None:
|
|
|
|
continue
|
|
|
|
target_attr = "enable_stream_" + notification_type
|
2021-02-12 08:19:30 +01:00
|
|
|
stream_dict[notification_type] = (
|
|
|
|
False if user_profile is None else getattr(user_profile, target_attr)
|
|
|
|
)
|
2021-03-27 03:01:37 +01:00
|
|
|
|
|
|
|
|
|
|
|
def subscriber_ids_with_stream_history_access(stream: Stream) -> Set[int]:
|
|
|
|
"""Returns the set of active user IDs who can access any message
|
|
|
|
history on this stream (regardless of whether they have a
|
|
|
|
UserMessage) based on the stream's configuration.
|
|
|
|
|
|
|
|
1. if !history_public_to_subscribers:
|
|
|
|
History is not available to anyone
|
2021-04-17 18:24:02 +02:00
|
|
|
2. if history_public_to_subscribers:
|
2021-03-27 03:01:37 +01:00
|
|
|
All subscribers can access the history including guests
|
2021-04-17 18:24:02 +02:00
|
|
|
|
|
|
|
The results of this function need to be kept consistent with
|
|
|
|
what can_access_stream_history would dictate.
|
2021-03-27 03:01:37 +01:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not stream.is_history_public_to_subscribers():
|
|
|
|
return set()
|
|
|
|
|
|
|
|
return set(
|
2021-04-17 18:24:02 +02:00
|
|
|
get_active_subscriptions_for_stream_id(
|
|
|
|
stream.id, include_deactivated_users=False
|
2021-04-22 16:23:09 +02:00
|
|
|
).values_list("user_profile_id", flat=True)
|
2021-03-27 03:01:37 +01:00
|
|
|
)
|
2021-05-11 13:55:49 +02:00
|
|
|
|
|
|
|
|
|
|
|
def get_subscriptions_for_send_message(
|
|
|
|
*,
|
|
|
|
realm_id: int,
|
|
|
|
stream_id: int,
|
|
|
|
possible_wildcard_mention: bool,
|
|
|
|
possibly_mentioned_user_ids: AbstractSet[int],
|
|
|
|
) -> QuerySet:
|
|
|
|
"""This function optimizes an important use case for large
|
|
|
|
streams. Open realms often have many long_term_idle users, which
|
|
|
|
can result in 10,000s of long_term_idle recipients in default
|
|
|
|
streams. do_send_messages has an optimization to avoid doing work
|
|
|
|
for long_term_idle unless message flags or notifications should be
|
|
|
|
generated.
|
|
|
|
|
|
|
|
However, it's expensive even to fetch and process them all in
|
|
|
|
Python at all. This function returns all recipients of a stream
|
|
|
|
message that could possibly require action in the send-message
|
|
|
|
codepath.
|
|
|
|
|
|
|
|
Basically, it returns all subscribers, excluding all long-term
|
|
|
|
idle users who it can prove will not receive a UserMessage row or
|
|
|
|
notification for the message (i.e. no alert words, mentions, or
|
|
|
|
email/push notifications are configured) and thus are not needed
|
|
|
|
for processing the message send.
|
|
|
|
|
|
|
|
Critically, this function is called before the Markdown
|
|
|
|
processor. As a result, it returns all subscribers who have ANY
|
|
|
|
configured alert words, even if their alert words aren't present
|
|
|
|
in the message. Similarly, it returns all subscribers who match
|
|
|
|
the "possible mention" parameters.
|
|
|
|
|
|
|
|
Downstream logic, which runs after the Markdown processor has
|
|
|
|
parsed the message, will do the precise determination.
|
|
|
|
"""
|
|
|
|
|
|
|
|
query = get_active_subscriptions_for_stream_id(
|
|
|
|
stream_id,
|
|
|
|
include_deactivated_users=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
if possible_wildcard_mention:
|
|
|
|
return query
|
|
|
|
|
|
|
|
query = query.filter(
|
|
|
|
Q(user_profile__long_term_idle=False)
|
|
|
|
| Q(push_notifications=True)
|
|
|
|
| (Q(push_notifications=None) & Q(user_profile__enable_stream_push_notifications=True))
|
|
|
|
| Q(email_notifications=True)
|
|
|
|
| (Q(email_notifications=None) & Q(user_profile__enable_stream_email_notifications=True))
|
|
|
|
| Q(user_profile_id__in=possibly_mentioned_user_ids)
|
|
|
|
| Q(
|
|
|
|
user_profile_id__in=AlertWord.objects.filter(realm_id=realm_id).values_list(
|
|
|
|
"user_profile_id"
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
return query
|