2020-06-11 00:54:34 +02:00
|
|
|
import copy
|
2016-10-04 15:52:26 +02:00
|
|
|
import datetime
|
|
|
|
import zlib
|
2020-11-24 12:31:28 +01:00
|
|
|
from dataclasses import dataclass, field
|
2022-06-23 20:07:19 +02:00
|
|
|
from typing import (
|
2022-06-23 19:58:17 +02:00
|
|
|
TYPE_CHECKING,
|
2022-06-23 20:07:19 +02:00
|
|
|
Any,
|
|
|
|
Collection,
|
|
|
|
Dict,
|
|
|
|
List,
|
|
|
|
Mapping,
|
|
|
|
Optional,
|
|
|
|
Sequence,
|
|
|
|
Set,
|
|
|
|
Tuple,
|
|
|
|
TypedDict,
|
|
|
|
Union,
|
|
|
|
)
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2020-06-11 00:54:34 +02:00
|
|
|
import ahocorasick
|
2020-08-07 01:09:47 +02:00
|
|
|
import orjson
|
2021-06-03 15:04:22 +02:00
|
|
|
from django.conf import settings
|
2019-03-20 04:15:58 +01:00
|
|
|
from django.db import connection
|
2020-10-13 15:49:40 +02:00
|
|
|
from django.db.models import Max, Sum
|
2020-06-11 00:54:34 +02:00
|
|
|
from django.utils.timezone import now as timezone_now
|
2021-04-16 00:57:30 +02:00
|
|
|
from django.utils.translation import gettext as _
|
2020-06-09 11:57:28 +02:00
|
|
|
from psycopg2.sql import SQL
|
2018-01-22 21:50:22 +01:00
|
|
|
|
2021-07-16 22:11:10 +02:00
|
|
|
from analytics.lib.counts import COUNT_STATS
|
|
|
|
from analytics.models import RealmCount
|
2017-10-10 05:23:53 +02:00
|
|
|
from zerver.lib.avatar import get_avatar_field
|
2017-11-07 17:36:29 +01:00
|
|
|
from zerver.lib.cache import (
|
2022-04-14 23:45:46 +02:00
|
|
|
cache_set_many,
|
2017-11-07 17:36:29 +01:00
|
|
|
cache_with_key,
|
|
|
|
generic_bulk_cached_fetch,
|
|
|
|
to_dict_cache_key,
|
|
|
|
to_dict_cache_key_id,
|
|
|
|
)
|
2021-07-16 22:11:10 +02:00
|
|
|
from zerver.lib.display_recipient import bulk_fetch_display_recipients
|
2021-09-20 10:34:10 +02:00
|
|
|
from zerver.lib.exceptions import JsonableError, MissingAuthenticationError
|
2021-06-17 12:20:40 +02:00
|
|
|
from zerver.lib.markdown import MessageRenderingResult, markdown_convert, topic_links
|
2020-06-28 02:42:57 +02:00
|
|
|
from zerver.lib.markdown import version as markdown_version
|
2021-06-13 00:51:30 +02:00
|
|
|
from zerver.lib.mention import MentionData
|
2021-06-14 18:49:28 +02:00
|
|
|
from zerver.lib.request import RequestVariableConversionError
|
2020-09-11 16:11:06 +02:00
|
|
|
from zerver.lib.stream_subscription import (
|
|
|
|
get_stream_subscriptions_for_user,
|
2021-05-12 23:40:58 +02:00
|
|
|
get_subscribed_stream_recipient_ids_for_user,
|
2020-09-11 16:11:06 +02:00
|
|
|
num_subscribers_for_stream_id,
|
|
|
|
)
|
2021-09-20 10:34:10 +02:00
|
|
|
from zerver.lib.streams import get_web_public_streams_queryset
|
2016-10-04 15:52:26 +02:00
|
|
|
from zerver.lib.timestamp import datetime_to_timestamp
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.topic import DB_TOPIC_NAME, MESSAGE__TOPIC, TOPIC_LINKS, TOPIC_NAME
|
2022-02-14 17:04:39 +01:00
|
|
|
from zerver.lib.types import DisplayRecipientT, EditHistoryEvent, UserDisplayRecipient
|
2022-04-14 21:57:20 +02:00
|
|
|
from zerver.lib.url_preview.types import UrlEmbedData
|
2022-02-22 21:07:07 +01:00
|
|
|
from zerver.lib.user_topics import build_topic_mute_checker, topic_is_muted
|
2016-10-04 15:52:26 +02:00
|
|
|
from zerver.models import (
|
2020-06-11 00:54:34 +02:00
|
|
|
MAX_TOPIC_NAME_LENGTH,
|
2016-10-04 15:52:26 +02:00
|
|
|
Message,
|
2020-06-11 00:54:34 +02:00
|
|
|
Reaction,
|
2017-01-18 23:19:18 +01:00
|
|
|
Realm,
|
2016-10-04 15:52:26 +02:00
|
|
|
Recipient,
|
2016-10-12 02:14:08 +02:00
|
|
|
Stream,
|
2018-02-11 14:09:17 +01:00
|
|
|
SubMessage,
|
2017-08-09 02:22:00 +02:00
|
|
|
Subscription,
|
2016-10-12 02:14:08 +02:00
|
|
|
UserMessage,
|
2020-06-11 00:54:34 +02:00
|
|
|
UserProfile,
|
|
|
|
get_display_recipient_by_id,
|
2018-07-27 11:47:07 +02:00
|
|
|
get_usermessage_by_message_id,
|
2020-06-11 00:54:34 +02:00
|
|
|
query_for_ids,
|
2016-10-04 15:52:26 +02:00
|
|
|
)
|
|
|
|
|
2022-06-23 19:58:17 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from django.db.models.query import _QuerySet as ValuesQuerySet
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-06-09 13:31:39 +02:00
|
|
|
class MessageDetailsDict(TypedDict, total=False):
|
|
|
|
type: str
|
|
|
|
mentioned: bool
|
|
|
|
user_ids: List[int]
|
|
|
|
stream_id: int
|
|
|
|
topic: str
|
|
|
|
unmuted_stream_msg: bool
|
|
|
|
|
|
|
|
|
2020-09-29 13:05:31 +02:00
|
|
|
class RawReactionRow(TypedDict):
|
|
|
|
emoji_code: str
|
|
|
|
emoji_name: str
|
|
|
|
message_id: int
|
|
|
|
reaction_type: str
|
|
|
|
user_profile__email: str
|
|
|
|
user_profile__full_name: str
|
2021-04-22 16:23:09 +02:00
|
|
|
user_profile_id: int
|
2020-09-29 13:05:31 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-07-09 20:34:02 +02:00
|
|
|
class RawUnreadStreamDict(TypedDict):
|
|
|
|
stream_id: int
|
|
|
|
topic: str
|
|
|
|
|
|
|
|
|
|
|
|
class RawUnreadPrivateMessageDict(TypedDict):
|
2022-03-07 16:47:49 +01:00
|
|
|
other_user_id: int
|
2021-07-09 20:34:02 +02:00
|
|
|
|
|
|
|
|
|
|
|
class RawUnreadHuddleDict(TypedDict):
|
|
|
|
user_ids_string: str
|
|
|
|
|
|
|
|
|
2020-05-02 06:24:43 +02:00
|
|
|
class RawUnreadMessagesResult(TypedDict):
|
2021-07-09 20:34:02 +02:00
|
|
|
pm_dict: Dict[int, RawUnreadPrivateMessageDict]
|
|
|
|
stream_dict: Dict[int, RawUnreadStreamDict]
|
|
|
|
huddle_dict: Dict[int, RawUnreadHuddleDict]
|
2020-05-02 06:24:43 +02:00
|
|
|
mentions: Set[int]
|
|
|
|
muted_stream_ids: List[int]
|
|
|
|
unmuted_stream_msgs: Set[int]
|
2021-03-18 22:33:52 +01:00
|
|
|
old_unreads_missing: bool
|
2017-11-10 15:57:43 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2022-03-07 15:12:24 +01:00
|
|
|
class UnreadStreamInfo(TypedDict):
|
|
|
|
stream_id: int
|
|
|
|
topic: str
|
|
|
|
unread_message_ids: List[int]
|
|
|
|
|
|
|
|
|
|
|
|
class UnreadPrivateMessageInfo(TypedDict):
|
2022-03-07 16:47:49 +01:00
|
|
|
other_user_id: int
|
|
|
|
# Deprecated and misleading synonym for other_user_id
|
2022-03-07 15:12:24 +01:00
|
|
|
sender_id: int
|
|
|
|
unread_message_ids: List[int]
|
|
|
|
|
|
|
|
|
|
|
|
class UnreadHuddleInfo(TypedDict):
|
|
|
|
user_ids_string: str
|
|
|
|
unread_message_ids: List[int]
|
|
|
|
|
|
|
|
|
2020-05-02 06:24:43 +02:00
|
|
|
class UnreadMessagesResult(TypedDict):
|
2022-03-07 15:12:24 +01:00
|
|
|
pms: List[UnreadPrivateMessageInfo]
|
|
|
|
streams: List[UnreadStreamInfo]
|
|
|
|
huddles: List[UnreadHuddleInfo]
|
2020-05-02 06:24:43 +02:00
|
|
|
mentions: List[int]
|
|
|
|
count: int
|
2021-03-18 22:33:52 +01:00
|
|
|
old_unreads_missing: bool
|
2017-08-09 04:01:00 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-11-24 12:31:28 +01:00
|
|
|
@dataclass
|
|
|
|
class SendMessageRequest:
|
|
|
|
message: Message
|
2021-06-17 12:20:40 +02:00
|
|
|
rendering_result: MessageRenderingResult
|
2020-11-24 12:31:28 +01:00
|
|
|
stream: Optional[Stream]
|
2021-05-20 16:20:00 +02:00
|
|
|
local_id: Optional[str]
|
|
|
|
sender_queue_id: Optional[str]
|
2020-11-24 12:31:28 +01:00
|
|
|
realm: Realm
|
|
|
|
mention_data: MentionData
|
2021-07-01 17:40:16 +02:00
|
|
|
mentioned_user_groups_map: Dict[int, int]
|
2020-11-24 12:31:28 +01:00
|
|
|
active_user_ids: Set[int]
|
2021-05-24 05:57:59 +02:00
|
|
|
online_push_user_ids: Set[int]
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
pm_mention_push_disabled_user_ids: Set[int]
|
|
|
|
pm_mention_email_disabled_user_ids: Set[int]
|
2020-11-24 12:31:28 +01:00
|
|
|
stream_push_user_ids: Set[int]
|
|
|
|
stream_email_user_ids: Set[int]
|
2021-06-06 05:56:52 +02:00
|
|
|
muted_sender_user_ids: Set[int]
|
2020-11-24 12:31:28 +01:00
|
|
|
um_eligible_user_ids: Set[int]
|
|
|
|
long_term_idle_user_ids: Set[int]
|
|
|
|
default_bot_user_ids: Set[int]
|
|
|
|
service_bot_tuples: List[Tuple[int, int]]
|
2021-12-19 12:04:36 +01:00
|
|
|
all_bot_user_ids: Set[int]
|
2020-11-24 12:31:28 +01:00
|
|
|
wildcard_mention_user_ids: Set[int]
|
|
|
|
links_for_embed: Set[str]
|
|
|
|
widget_content: Optional[Dict[str, Any]]
|
|
|
|
submessages: List[Dict[str, Any]] = field(default_factory=list)
|
|
|
|
deliver_at: Optional[datetime.datetime] = None
|
|
|
|
delivery_type: Optional[str] = None
|
2022-01-29 01:22:09 +01:00
|
|
|
limit_unread_user_ids: Optional[Set[int]] = None
|
2022-07-29 23:56:10 +02:00
|
|
|
service_queue_events: Optional[Dict[str, List[Dict[str, Any]]]] = None
|
2020-11-24 12:31:28 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-08-01 20:56:40 +02:00
|
|
|
# We won't try to fetch more unread message IDs from the database than
|
|
|
|
# this limit. The limit is super high, in large part because it means
|
|
|
|
# client-side code mostly doesn't need to think about the case that a
|
|
|
|
# user has more older unread messages that were cut off.
|
|
|
|
MAX_UNREAD_MESSAGES = 50000
|
2017-08-01 18:28:56 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-03-28 07:46:04 +01:00
|
|
|
def truncate_content(content: str, max_length: int, truncation_message: str) -> str:
|
|
|
|
if len(content) > max_length:
|
2021-02-12 08:19:30 +01:00
|
|
|
content = content[: max_length - len(truncation_message)] + truncation_message
|
2020-03-28 07:46:04 +01:00
|
|
|
return content
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-12-19 02:36:50 +01:00
|
|
|
def normalize_body(body: str) -> str:
|
2021-07-05 17:55:02 +02:00
|
|
|
body = body.rstrip().lstrip("\n")
|
2020-12-19 02:36:50 +01:00
|
|
|
if len(body) == 0:
|
|
|
|
raise JsonableError(_("Message must not be empty"))
|
2021-02-12 08:20:45 +01:00
|
|
|
if "\x00" in body:
|
2020-12-19 02:36:50 +01:00
|
|
|
raise JsonableError(_("Message must not contain null bytes"))
|
2021-06-03 15:04:22 +02:00
|
|
|
return truncate_content(body, settings.MAX_MESSAGE_LENGTH, "\n[message truncated]")
|
2020-03-28 07:46:04 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-03-28 07:46:04 +01:00
|
|
|
def truncate_topic(topic: str) -> str:
|
|
|
|
return truncate_content(topic, MAX_TOPIC_NAME_LENGTH, "...")
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def messages_for_ids(
|
|
|
|
message_ids: List[int],
|
|
|
|
user_message_flags: Dict[int, List[str]],
|
|
|
|
search_fields: Dict[int, Dict[str, str]],
|
|
|
|
apply_markdown: bool,
|
|
|
|
client_gravatar: bool,
|
|
|
|
allow_edit_history: bool,
|
|
|
|
) -> List[Dict[str, Any]]:
|
2017-11-07 17:36:29 +01:00
|
|
|
|
|
|
|
cache_transformer = MessageDict.build_dict_from_raw_db_row
|
2021-02-12 08:20:45 +01:00
|
|
|
id_fetcher = lambda row: row["id"]
|
2017-11-07 17:36:29 +01:00
|
|
|
|
2019-08-08 21:34:06 +02:00
|
|
|
message_dicts = generic_bulk_cached_fetch(
|
|
|
|
to_dict_cache_key_id,
|
|
|
|
MessageDict.get_raw_db_rows,
|
|
|
|
message_ids,
|
|
|
|
id_fetcher=id_fetcher,
|
|
|
|
cache_transformer=cache_transformer,
|
|
|
|
extractor=extract_message_dict,
|
2021-02-12 08:19:30 +01:00
|
|
|
setter=stringify_message_dict,
|
|
|
|
)
|
2017-11-07 17:36:29 +01:00
|
|
|
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
message_list: List[Dict[str, Any]] = []
|
2017-11-07 17:36:29 +01:00
|
|
|
|
|
|
|
for message_id in message_ids:
|
|
|
|
msg_dict = message_dicts[message_id]
|
2020-09-03 05:32:15 +02:00
|
|
|
msg_dict.update(flags=user_message_flags[message_id])
|
2017-11-07 17:36:29 +01:00
|
|
|
if message_id in search_fields:
|
|
|
|
msg_dict.update(search_fields[message_id])
|
|
|
|
# Make sure that we never send message edit history to clients
|
|
|
|
# in realms with allow_edit_history disabled.
|
|
|
|
if "edit_history" in msg_dict and not allow_edit_history:
|
|
|
|
del msg_dict["edit_history"]
|
|
|
|
message_list.append(msg_dict)
|
|
|
|
|
|
|
|
MessageDict.post_process_dicts(message_list, apply_markdown, client_gravatar)
|
|
|
|
|
|
|
|
return message_list
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def sew_messages_and_reactions(
|
|
|
|
messages: List[Dict[str, Any]], reactions: List[Dict[str, Any]]
|
|
|
|
) -> List[Dict[str, Any]]:
|
2017-10-14 16:14:54 +02:00
|
|
|
"""Given a iterable of messages and reactions stitch reactions
|
|
|
|
into messages.
|
|
|
|
"""
|
|
|
|
# Add all messages with empty reaction item
|
|
|
|
for message in messages:
|
2021-02-12 08:20:45 +01:00
|
|
|
message["reactions"] = []
|
2017-10-14 16:14:54 +02:00
|
|
|
|
|
|
|
# Convert list of messages into dictionary to make reaction stitching easy
|
2021-02-12 08:20:45 +01:00
|
|
|
converted_messages = {message["id"]: message for message in messages}
|
2017-10-14 16:14:54 +02:00
|
|
|
|
|
|
|
for reaction in reactions:
|
2021-02-12 08:20:45 +01:00
|
|
|
converted_messages[reaction["message_id"]]["reactions"].append(reaction)
|
2017-10-14 16:14:54 +02:00
|
|
|
|
|
|
|
return list(converted_messages.values())
|
|
|
|
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def sew_messages_and_submessages(
|
|
|
|
messages: List[Dict[str, Any]], submessages: List[Dict[str, Any]]
|
|
|
|
) -> None:
|
2018-02-11 14:09:17 +01:00
|
|
|
# This is super similar to sew_messages_and_reactions.
|
|
|
|
for message in messages:
|
2021-02-12 08:20:45 +01:00
|
|
|
message["submessages"] = []
|
2018-02-11 14:09:17 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
message_dict = {message["id"]: message for message in messages}
|
2018-02-11 14:09:17 +01:00
|
|
|
|
|
|
|
for submessage in submessages:
|
2021-02-12 08:20:45 +01:00
|
|
|
message_id = submessage["message_id"]
|
2018-02-11 14:09:17 +01:00
|
|
|
if message_id in message_dict:
|
|
|
|
message = message_dict[message_id]
|
2021-02-12 08:20:45 +01:00
|
|
|
message["submessages"].append(submessage)
|
2018-02-11 14:09:17 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def extract_message_dict(message_bytes: bytes) -> Dict[str, Any]:
|
2020-08-07 01:09:47 +02:00
|
|
|
return orjson.loads(zlib.decompress(message_bytes))
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def stringify_message_dict(message_dict: Dict[str, Any]) -> bytes:
|
2020-08-07 01:09:47 +02:00
|
|
|
return zlib.compress(orjson.dumps(message_dict))
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
@cache_with_key(to_dict_cache_key, timeout=3600 * 24)
|
|
|
|
def message_to_dict_json(message: Message, realm_id: Optional[int] = None) -> bytes:
|
2020-06-02 10:03:20 +02:00
|
|
|
return MessageDict.to_dict_uncached([message], realm_id)[message.id]
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-08-09 17:50:43 +02:00
|
|
|
def save_message_rendered_content(message: Message, content: str) -> str:
|
2021-06-17 12:20:40 +02:00
|
|
|
rendering_result = render_markdown(message, content, realm=message.get_realm())
|
|
|
|
rendered_content = None
|
|
|
|
if rendering_result is not None:
|
|
|
|
rendered_content = rendering_result.rendered_content
|
2018-08-09 17:50:43 +02:00
|
|
|
message.rendered_content = rendered_content
|
2020-06-28 02:42:57 +02:00
|
|
|
message.rendered_content_version = markdown_version
|
2018-08-09 17:50:43 +02:00
|
|
|
message.save_rendered_content()
|
|
|
|
return rendered_content
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:37:41 +01:00
|
|
|
class MessageDict:
|
2020-09-28 20:50:02 +02:00
|
|
|
"""MessageDict is the core class responsible for marshalling Message
|
|
|
|
objects obtained from the database into a format that can be sent
|
|
|
|
to clients via the Zulip API, whether via `GET /messages`,
|
|
|
|
outgoing webhooks, or other code paths. There are two core flows through
|
|
|
|
which this class is used:
|
|
|
|
|
|
|
|
* For just-sent messages, we construct a single `wide_dict` object
|
|
|
|
containing all the data for the message and the related
|
|
|
|
UserProfile models (sender_info and recipient_info); this object
|
|
|
|
can be stored in queues, caches, etc., and then later turned
|
|
|
|
into an API-format JSONable dictionary via finalize_payload.
|
|
|
|
|
|
|
|
* When fetching messages from the database, we fetch their data in
|
|
|
|
bulk using messages_for_ids, which makes use of caching, bulk
|
|
|
|
fetches that skip the Django ORM, etc., to provide an optimized
|
|
|
|
interface for fetching hundreds of thousands of messages from
|
|
|
|
the database and then turning them into API-format JSON
|
|
|
|
dictionaries.
|
|
|
|
|
|
|
|
"""
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-10-20 21:34:05 +02:00
|
|
|
@staticmethod
|
2021-02-12 08:19:30 +01:00
|
|
|
def wide_dict(message: Message, realm_id: Optional[int] = None) -> Dict[str, Any]:
|
|
|
|
"""
|
2020-03-28 01:25:56 +01:00
|
|
|
The next two lines get the cacheable field related
|
2017-10-20 21:34:05 +02:00
|
|
|
to our message object, with the side effect of
|
|
|
|
populating the cache.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2020-06-02 10:03:20 +02:00
|
|
|
json = message_to_dict_json(message, realm_id)
|
2017-10-20 21:34:05 +02:00
|
|
|
obj = extract_message_dict(json)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2017-10-20 21:34:05 +02:00
|
|
|
The steps below are similar to what we do in
|
|
|
|
post_process_dicts(), except we don't call finalize_payload(),
|
|
|
|
since that step happens later in the queue
|
|
|
|
processor.
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2017-10-20 21:34:05 +02:00
|
|
|
MessageDict.bulk_hydrate_sender_info([obj])
|
2019-08-07 00:18:13 +02:00
|
|
|
MessageDict.bulk_hydrate_recipient_info([obj])
|
2017-10-20 21:34:05 +02:00
|
|
|
|
|
|
|
return obj
|
|
|
|
|
2017-10-10 09:22:21 +02:00
|
|
|
@staticmethod
|
2021-02-12 08:19:30 +01:00
|
|
|
def post_process_dicts(
|
|
|
|
objs: List[Dict[str, Any]], apply_markdown: bool, client_gravatar: bool
|
|
|
|
) -> None:
|
|
|
|
"""
|
2020-03-26 23:16:23 +01:00
|
|
|
NOTE: This function mutates the objects in
|
|
|
|
the `objs` list, rather than making
|
|
|
|
shallow copies. It might be safer to
|
|
|
|
make shallow copies here, but performance
|
|
|
|
is somewhat important here, as we are
|
2020-09-28 20:59:26 +02:00
|
|
|
often fetching hundreds of messages.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2017-10-10 20:10:57 +02:00
|
|
|
MessageDict.bulk_hydrate_sender_info(objs)
|
2019-08-07 00:18:13 +02:00
|
|
|
MessageDict.bulk_hydrate_recipient_info(objs)
|
2017-10-10 20:10:57 +02:00
|
|
|
|
2017-10-10 09:22:21 +02:00
|
|
|
for obj in objs:
|
2020-09-28 20:59:26 +02:00
|
|
|
MessageDict.finalize_payload(obj, apply_markdown, client_gravatar, skip_copy=True)
|
2017-10-14 02:01:20 +02:00
|
|
|
|
2017-10-20 21:27:26 +02:00
|
|
|
@staticmethod
|
2021-02-12 08:19:30 +01:00
|
|
|
def finalize_payload(
|
|
|
|
obj: Dict[str, Any],
|
|
|
|
apply_markdown: bool,
|
|
|
|
client_gravatar: bool,
|
|
|
|
keep_rendered_content: bool = False,
|
|
|
|
skip_copy: bool = False,
|
|
|
|
) -> Dict[str, Any]:
|
|
|
|
"""
|
2020-09-28 20:59:26 +02:00
|
|
|
By default, we make a shallow copy of the incoming dict to avoid
|
|
|
|
mutation-related bugs. Code paths that are passing a unique object
|
|
|
|
can pass skip_copy=True to avoid this extra work.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2020-09-28 20:59:26 +02:00
|
|
|
if not skip_copy:
|
|
|
|
obj = copy.copy(obj)
|
2020-03-26 23:16:23 +01:00
|
|
|
|
2017-10-31 03:02:23 +01:00
|
|
|
MessageDict.set_sender_avatar(obj, client_gravatar)
|
2017-10-20 21:27:26 +02:00
|
|
|
if apply_markdown:
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["content_type"] = "text/html"
|
|
|
|
obj["content"] = obj["rendered_content"]
|
2017-10-20 21:27:26 +02:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["content_type"] = "text/x-markdown"
|
2017-10-20 21:27:26 +02:00
|
|
|
|
2019-11-05 20:35:47 +01:00
|
|
|
if not keep_rendered_content:
|
2021-02-12 08:20:45 +01:00
|
|
|
del obj["rendered_content"]
|
|
|
|
del obj["sender_realm_id"]
|
|
|
|
del obj["sender_avatar_source"]
|
|
|
|
del obj["sender_delivery_email"]
|
|
|
|
del obj["sender_avatar_version"]
|
|
|
|
|
|
|
|
del obj["recipient_type"]
|
|
|
|
del obj["recipient_type_id"]
|
|
|
|
del obj["sender_is_mirror_dummy"]
|
2020-09-28 20:59:26 +02:00
|
|
|
return obj
|
2017-10-14 15:44:59 +02:00
|
|
|
|
2016-10-04 15:52:26 +02:00
|
|
|
@staticmethod
|
2021-02-12 08:19:30 +01:00
|
|
|
def sew_submessages_and_reactions_to_msgs(
|
|
|
|
messages: List[Dict[str, Any]]
|
|
|
|
) -> List[Dict[str, Any]]:
|
2021-02-12 08:20:45 +01:00
|
|
|
msg_ids = [msg["id"] for msg in messages]
|
2020-05-19 10:20:08 +02:00
|
|
|
submessages = SubMessage.get_raw_db_rows(msg_ids)
|
|
|
|
sew_messages_and_submessages(messages, submessages)
|
|
|
|
|
|
|
|
reactions = Reaction.get_raw_db_rows(msg_ids)
|
|
|
|
return sew_messages_and_reactions(messages, reactions)
|
|
|
|
|
2016-10-04 15:52:26 +02:00
|
|
|
@staticmethod
|
2021-02-12 08:19:30 +01:00
|
|
|
def to_dict_uncached(
|
|
|
|
messages: List[Message], realm_id: Optional[int] = None
|
|
|
|
) -> Dict[int, bytes]:
|
2020-06-02 10:03:20 +02:00
|
|
|
messages_dict = MessageDict.to_dict_uncached_helper(messages, realm_id)
|
2021-02-12 08:20:45 +01:00
|
|
|
encoded_messages = {msg["id"]: stringify_message_dict(msg) for msg in messages_dict}
|
2020-05-19 10:22:49 +02:00
|
|
|
return encoded_messages
|
|
|
|
|
|
|
|
@staticmethod
|
2021-02-12 08:19:30 +01:00
|
|
|
def to_dict_uncached_helper(
|
|
|
|
messages: List[Message], realm_id: Optional[int] = None
|
|
|
|
) -> List[Dict[str, Any]]:
|
2020-05-21 22:42:29 +02:00
|
|
|
# Near duplicate of the build_message_dict + get_raw_db_rows
|
|
|
|
# code path that accepts already fetched Message objects
|
|
|
|
# rather than message IDs.
|
2020-05-19 10:22:49 +02:00
|
|
|
|
2020-05-21 22:42:29 +02:00
|
|
|
def get_rendering_realm_id(message: Message) -> int:
|
2020-06-02 10:03:20 +02:00
|
|
|
# realm_id can differ among users, currently only possible
|
|
|
|
# with cross realm bots.
|
|
|
|
if realm_id is not None:
|
|
|
|
return realm_id
|
2020-05-21 22:42:29 +02:00
|
|
|
if message.recipient.type == Recipient.STREAM:
|
|
|
|
return Stream.objects.get(id=message.recipient.type_id).realm_id
|
|
|
|
return message.sender.realm_id
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
message_rows = [
|
|
|
|
{
|
2021-02-12 08:20:45 +01:00
|
|
|
"id": message.id,
|
2021-02-12 08:19:30 +01:00
|
|
|
DB_TOPIC_NAME: message.topic_name(),
|
|
|
|
"date_sent": message.date_sent,
|
|
|
|
"last_edit_time": message.last_edit_time,
|
|
|
|
"edit_history": message.edit_history,
|
|
|
|
"content": message.content,
|
|
|
|
"rendered_content": message.rendered_content,
|
|
|
|
"rendered_content_version": message.rendered_content_version,
|
|
|
|
"recipient_id": message.recipient.id,
|
|
|
|
"recipient__type": message.recipient.type,
|
|
|
|
"recipient__type_id": message.recipient.type_id,
|
|
|
|
"rendering_realm_id": get_rendering_realm_id(message),
|
|
|
|
"sender_id": message.sender.id,
|
|
|
|
"sending_client__name": message.sending_client.name,
|
|
|
|
"sender__realm_id": message.sender.realm_id,
|
|
|
|
}
|
|
|
|
for message in messages
|
|
|
|
]
|
2020-05-21 22:42:29 +02:00
|
|
|
|
|
|
|
MessageDict.sew_submessages_and_reactions_to_msgs(message_rows)
|
|
|
|
return [MessageDict.build_dict_from_raw_db_row(row) for row in message_rows]
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2017-10-14 16:14:54 +02:00
|
|
|
@staticmethod
|
2017-11-05 11:15:10 +01:00
|
|
|
def get_raw_db_rows(needed_ids: List[int]) -> List[Dict[str, Any]]:
|
2017-10-14 16:14:54 +02:00
|
|
|
# This is a special purpose function optimized for
|
|
|
|
# callers like get_messages_backend().
|
|
|
|
fields = [
|
2021-02-12 08:20:45 +01:00
|
|
|
"id",
|
2018-11-08 15:33:44 +01:00
|
|
|
DB_TOPIC_NAME,
|
2021-02-12 08:20:45 +01:00
|
|
|
"date_sent",
|
|
|
|
"last_edit_time",
|
|
|
|
"edit_history",
|
|
|
|
"content",
|
|
|
|
"rendered_content",
|
|
|
|
"rendered_content_version",
|
|
|
|
"recipient_id",
|
|
|
|
"recipient__type",
|
|
|
|
"recipient__type_id",
|
|
|
|
"sender_id",
|
|
|
|
"sending_client__name",
|
|
|
|
"sender__realm_id",
|
2017-10-14 16:14:54 +02:00
|
|
|
]
|
|
|
|
messages = Message.objects.filter(id__in=needed_ids).values(*fields)
|
2020-05-19 10:20:08 +02:00
|
|
|
return MessageDict.sew_submessages_and_reactions_to_msgs(messages)
|
2017-10-14 16:14:54 +02:00
|
|
|
|
2016-10-04 15:52:26 +02:00
|
|
|
@staticmethod
|
2017-11-05 11:15:10 +01:00
|
|
|
def build_dict_from_raw_db_row(row: Dict[str, Any]) -> Dict[str, Any]:
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2016-10-04 15:52:26 +02:00
|
|
|
row is a row from a .values() call, and it needs to have
|
|
|
|
all the relevant fields populated
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2016-10-04 15:52:26 +02:00
|
|
|
return MessageDict.build_message_dict(
|
2021-02-12 08:20:45 +01:00
|
|
|
message_id=row["id"],
|
|
|
|
last_edit_time=row["last_edit_time"],
|
2022-03-02 20:49:53 +01:00
|
|
|
edit_history_json=row["edit_history"],
|
2021-02-12 08:20:45 +01:00
|
|
|
content=row["content"],
|
2021-02-12 08:19:30 +01:00
|
|
|
topic_name=row[DB_TOPIC_NAME],
|
2021-02-12 08:20:45 +01:00
|
|
|
date_sent=row["date_sent"],
|
|
|
|
rendered_content=row["rendered_content"],
|
|
|
|
rendered_content_version=row["rendered_content_version"],
|
|
|
|
sender_id=row["sender_id"],
|
|
|
|
sender_realm_id=row["sender__realm_id"],
|
|
|
|
sending_client_name=row["sending_client__name"],
|
|
|
|
rendering_realm_id=row.get("rendering_realm_id", row["sender__realm_id"]),
|
|
|
|
recipient_id=row["recipient_id"],
|
|
|
|
recipient_type=row["recipient__type"],
|
|
|
|
recipient_type_id=row["recipient__type_id"],
|
|
|
|
reactions=row["reactions"],
|
|
|
|
submessages=row["submessages"],
|
2016-10-04 15:52:26 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def build_message_dict(
|
2021-02-12 08:19:30 +01:00
|
|
|
message_id: int,
|
|
|
|
last_edit_time: Optional[datetime.datetime],
|
2022-03-02 20:49:53 +01:00
|
|
|
edit_history_json: Optional[str],
|
2021-02-12 08:19:30 +01:00
|
|
|
content: str,
|
|
|
|
topic_name: str,
|
|
|
|
date_sent: datetime.datetime,
|
|
|
|
rendered_content: Optional[str],
|
|
|
|
rendered_content_version: Optional[int],
|
|
|
|
sender_id: int,
|
|
|
|
sender_realm_id: int,
|
|
|
|
sending_client_name: str,
|
|
|
|
rendering_realm_id: int,
|
|
|
|
recipient_id: int,
|
|
|
|
recipient_type: int,
|
|
|
|
recipient_type_id: int,
|
|
|
|
reactions: List[RawReactionRow],
|
|
|
|
submessages: List[Dict[str, Any]],
|
2018-03-12 02:47:49 +01:00
|
|
|
) -> Dict[str, Any]:
|
2016-10-04 15:52:26 +02:00
|
|
|
|
|
|
|
obj = dict(
|
2021-02-12 08:19:30 +01:00
|
|
|
id=message_id,
|
|
|
|
sender_id=sender_id,
|
|
|
|
content=content,
|
|
|
|
recipient_type_id=recipient_type_id,
|
|
|
|
recipient_type=recipient_type,
|
|
|
|
recipient_id=recipient_id,
|
|
|
|
timestamp=datetime_to_timestamp(date_sent),
|
|
|
|
client=sending_client_name,
|
|
|
|
)
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2018-11-08 15:33:44 +01:00
|
|
|
obj[TOPIC_NAME] = topic_name
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["sender_realm_id"] = sender_realm_id
|
2017-10-14 02:01:20 +02:00
|
|
|
|
2019-09-26 13:41:46 +02:00
|
|
|
# Render topic_links with the stream's realm instead of the
|
2020-05-21 22:42:29 +02:00
|
|
|
# sender's realm; this is important for messages sent by
|
2019-09-26 13:41:46 +02:00
|
|
|
# cross-realm bots like NOTIFICATION_BOT.
|
2020-06-28 02:42:57 +02:00
|
|
|
obj[TOPIC_LINKS] = topic_links(rendering_realm_id, topic_name)
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2017-01-24 06:11:49 +01:00
|
|
|
if last_edit_time is not None:
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["last_edit_timestamp"] = datetime_to_timestamp(last_edit_time)
|
2022-03-02 20:49:53 +01:00
|
|
|
assert edit_history_json is not None
|
2022-02-14 17:04:39 +01:00
|
|
|
edit_history: List[EditHistoryEvent] = orjson.loads(edit_history_json)
|
2022-03-02 20:49:53 +01:00
|
|
|
obj["edit_history"] = edit_history
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
if Message.need_to_render_content(
|
|
|
|
rendered_content, rendered_content_version, markdown_version
|
|
|
|
):
|
2020-05-21 22:42:29 +02:00
|
|
|
# We really shouldn't be rendering objects in this method, but there is
|
2020-08-11 01:47:49 +02:00
|
|
|
# a scenario where we upgrade the version of Markdown and fail to run
|
2020-05-21 22:42:29 +02:00
|
|
|
# management commands to re-render historical messages, and then we
|
|
|
|
# need to have side effects. This method is optimized to not need full
|
2020-08-11 01:47:49 +02:00
|
|
|
# blown ORM objects, but the Markdown renderer is unfortunately highly
|
2020-05-21 22:42:29 +02:00
|
|
|
# coupled to Message, and we also need to persist the new rendered content.
|
|
|
|
# If we don't have a message object passed in, we get one here. The cost
|
|
|
|
# of going to the DB here should be overshadowed by the cost of rendering
|
|
|
|
# and updating the row.
|
2020-08-11 01:47:49 +02:00
|
|
|
# TODO: see #1379 to eliminate Markdown dependencies
|
2020-05-21 22:42:29 +02:00
|
|
|
message = Message.objects.select_related().get(id=message_id)
|
2017-10-20 20:29:49 +02:00
|
|
|
|
|
|
|
assert message is not None # Hint for mypy.
|
|
|
|
# It's unfortunate that we need to have side effects on the message
|
|
|
|
# in some cases.
|
2018-08-09 17:50:43 +02:00
|
|
|
rendered_content = save_message_rendered_content(message, content)
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2017-10-20 20:29:49 +02:00
|
|
|
if rendered_content is not None:
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["rendered_content"] = rendered_content
|
2016-10-04 15:52:26 +02:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["rendered_content"] = (
|
|
|
|
"<p>[Zulip note: Sorry, we could not "
|
|
|
|
+ "understand the formatting of your message]</p>"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2016-10-04 15:52:26 +02:00
|
|
|
|
2017-09-25 04:24:13 +02:00
|
|
|
if rendered_content is not None:
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["is_me_message"] = Message.is_status_message(content, rendered_content)
|
2017-09-25 04:24:13 +02:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["is_me_message"] = False
|
2017-09-25 04:24:13 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["reactions"] = [
|
2021-02-12 08:19:30 +01:00
|
|
|
ReactionDict.build_dict_from_raw_db_row(reaction) for reaction in reactions
|
|
|
|
]
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["submessages"] = submessages
|
2016-10-04 15:52:26 +02:00
|
|
|
return obj
|
|
|
|
|
2017-10-10 20:10:57 +02:00
|
|
|
@staticmethod
|
2017-11-05 11:15:10 +01:00
|
|
|
def bulk_hydrate_sender_info(objs: List[Dict[str, Any]]) -> None:
|
2017-10-10 20:10:57 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
sender_ids = list({obj["sender_id"] for obj in objs})
|
2017-10-10 20:10:57 +02:00
|
|
|
|
|
|
|
if not sender_ids:
|
|
|
|
return
|
|
|
|
|
|
|
|
query = UserProfile.objects.values(
|
2021-02-12 08:20:45 +01:00
|
|
|
"id",
|
|
|
|
"full_name",
|
|
|
|
"delivery_email",
|
|
|
|
"email",
|
|
|
|
"realm__string_id",
|
|
|
|
"avatar_source",
|
|
|
|
"avatar_version",
|
|
|
|
"is_mirror_dummy",
|
2017-10-10 20:10:57 +02:00
|
|
|
)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
rows = query_for_ids(query, sender_ids, "zerver_userprofile.id")
|
2017-10-10 20:10:57 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
sender_dict = {row["id"]: row for row in rows}
|
2017-10-10 20:10:57 +02:00
|
|
|
|
|
|
|
for obj in objs:
|
2021-02-12 08:20:45 +01:00
|
|
|
sender_id = obj["sender_id"]
|
2017-10-10 20:10:57 +02:00
|
|
|
user_row = sender_dict[sender_id]
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["sender_full_name"] = user_row["full_name"]
|
|
|
|
obj["sender_email"] = user_row["email"]
|
|
|
|
obj["sender_delivery_email"] = user_row["delivery_email"]
|
|
|
|
obj["sender_realm_str"] = user_row["realm__string_id"]
|
|
|
|
obj["sender_avatar_source"] = user_row["avatar_source"]
|
|
|
|
obj["sender_avatar_version"] = user_row["avatar_version"]
|
|
|
|
obj["sender_is_mirror_dummy"] = user_row["is_mirror_dummy"]
|
2017-10-10 20:10:57 +02:00
|
|
|
|
2017-10-10 08:12:03 +02:00
|
|
|
@staticmethod
|
2019-08-18 00:40:35 +02:00
|
|
|
def hydrate_recipient_info(obj: Dict[str, Any], display_recipient: DisplayRecipientT) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2017-10-10 08:12:03 +02:00
|
|
|
This method hyrdrates recipient info with things
|
|
|
|
like full names and emails of senders. Eventually
|
|
|
|
our clients should be able to hyrdrate these fields
|
|
|
|
themselves with info they already have on users.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2017-10-10 08:12:03 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
recipient_type = obj["recipient_type"]
|
|
|
|
recipient_type_id = obj["recipient_type_id"]
|
|
|
|
sender_is_mirror_dummy = obj["sender_is_mirror_dummy"]
|
|
|
|
sender_email = obj["sender_email"]
|
|
|
|
sender_full_name = obj["sender_full_name"]
|
|
|
|
sender_id = obj["sender_id"]
|
2017-10-10 08:12:03 +02:00
|
|
|
|
|
|
|
if recipient_type == Recipient.STREAM:
|
|
|
|
display_type = "stream"
|
|
|
|
elif recipient_type in (Recipient.HUDDLE, Recipient.PERSONAL):
|
2018-05-11 01:40:23 +02:00
|
|
|
assert not isinstance(display_recipient, str)
|
2017-10-10 08:12:03 +02:00
|
|
|
display_type = "private"
|
|
|
|
if len(display_recipient) == 1:
|
|
|
|
# add the sender in if this isn't a message between
|
|
|
|
# someone and themself, preserving ordering
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
recip: UserDisplayRecipient = {
|
2021-02-12 08:20:45 +01:00
|
|
|
"email": sender_email,
|
|
|
|
"full_name": sender_full_name,
|
|
|
|
"id": sender_id,
|
|
|
|
"is_mirror_dummy": sender_is_mirror_dummy,
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
}
|
2021-02-12 08:20:45 +01:00
|
|
|
if recip["email"] < display_recipient[0]["email"]:
|
2017-10-10 08:12:03 +02:00
|
|
|
display_recipient = [recip, display_recipient[0]]
|
2021-02-12 08:20:45 +01:00
|
|
|
elif recip["email"] > display_recipient[0]["email"]:
|
2017-10-10 08:12:03 +02:00
|
|
|
display_recipient = [display_recipient[0], recip]
|
|
|
|
else:
|
2020-06-10 06:41:04 +02:00
|
|
|
raise AssertionError(f"Invalid recipient type {recipient_type}")
|
2017-10-10 08:12:03 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["display_recipient"] = display_recipient
|
|
|
|
obj["type"] = display_type
|
|
|
|
if obj["type"] == "stream":
|
|
|
|
obj["stream_id"] = recipient_type_id
|
2016-12-06 07:19:34 +01:00
|
|
|
|
2019-08-07 00:18:13 +02:00
|
|
|
@staticmethod
|
|
|
|
def bulk_hydrate_recipient_info(objs: List[Dict[str, Any]]) -> None:
|
2020-04-09 21:51:58 +02:00
|
|
|
recipient_tuples = { # We use set to eliminate duplicate tuples.
|
2019-08-07 00:18:13 +02:00
|
|
|
(
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["recipient_id"],
|
|
|
|
obj["recipient_type"],
|
|
|
|
obj["recipient_type_id"],
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
for obj in objs
|
2020-04-09 21:51:58 +02:00
|
|
|
}
|
2019-08-07 00:18:13 +02:00
|
|
|
display_recipients = bulk_fetch_display_recipients(recipient_tuples)
|
|
|
|
|
|
|
|
for obj in objs:
|
2021-02-12 08:20:45 +01:00
|
|
|
MessageDict.hydrate_recipient_info(obj, display_recipients[obj["recipient_id"]])
|
2019-08-07 00:18:13 +02:00
|
|
|
|
2017-10-14 02:01:20 +02:00
|
|
|
@staticmethod
|
2017-11-05 11:15:10 +01:00
|
|
|
def set_sender_avatar(obj: Dict[str, Any], client_gravatar: bool) -> None:
|
2021-02-12 08:20:45 +01:00
|
|
|
sender_id = obj["sender_id"]
|
|
|
|
sender_realm_id = obj["sender_realm_id"]
|
|
|
|
sender_delivery_email = obj["sender_delivery_email"]
|
|
|
|
sender_avatar_source = obj["sender_avatar_source"]
|
|
|
|
sender_avatar_version = obj["sender_avatar_version"]
|
2017-10-14 02:01:20 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
obj["avatar_url"] = get_avatar_field(
|
2017-10-14 02:01:20 +02:00
|
|
|
user_id=sender_id,
|
|
|
|
realm_id=sender_realm_id,
|
2019-11-05 20:23:58 +01:00
|
|
|
email=sender_delivery_email,
|
2017-10-14 02:01:20 +02:00
|
|
|
avatar_source=sender_avatar_source,
|
|
|
|
avatar_version=sender_avatar_version,
|
|
|
|
medium=False,
|
|
|
|
client_gravatar=client_gravatar,
|
|
|
|
)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:37:41 +01:00
|
|
|
class ReactionDict:
|
2016-12-06 07:19:34 +01:00
|
|
|
@staticmethod
|
2020-09-29 13:05:31 +02:00
|
|
|
def build_dict_from_raw_db_row(row: RawReactionRow) -> Dict[str, Any]:
|
2021-02-12 08:19:30 +01:00
|
|
|
return {
|
2021-02-12 08:20:45 +01:00
|
|
|
"emoji_name": row["emoji_name"],
|
|
|
|
"emoji_code": row["emoji_code"],
|
|
|
|
"reaction_type": row["reaction_type"],
|
2021-02-12 08:19:30 +01:00
|
|
|
# TODO: We plan to remove this redundant user dictionary once
|
|
|
|
# clients are updated to support accessing use user_id. See
|
|
|
|
# https://github.com/zulip/zulip/pull/14711 for details.
|
|
|
|
#
|
|
|
|
# When we do that, we can likely update the `.values()` query to
|
|
|
|
# not fetch the extra user_profile__* fields from the database
|
|
|
|
# as a small performance optimization.
|
2021-02-12 08:20:45 +01:00
|
|
|
"user": {
|
|
|
|
"email": row["user_profile__email"],
|
2021-04-22 16:23:09 +02:00
|
|
|
"id": row["user_profile_id"],
|
2021-02-12 08:20:45 +01:00
|
|
|
"full_name": row["user_profile__full_name"],
|
2021-02-12 08:19:30 +01:00
|
|
|
},
|
2021-04-22 16:23:09 +02:00
|
|
|
"user_id": row["user_profile_id"],
|
2021-02-12 08:19:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def access_message(
|
2021-06-03 07:04:25 +02:00
|
|
|
user_profile: UserProfile,
|
|
|
|
message_id: int,
|
|
|
|
lock_message: bool = False,
|
2021-02-12 08:19:30 +01:00
|
|
|
) -> Tuple[Message, Optional[UserMessage]]:
|
2016-10-12 02:14:08 +02:00
|
|
|
"""You can access a message by ID in our APIs that either:
|
|
|
|
(1) You received or have previously accessed via starring
|
|
|
|
(aka have a UserMessage row for).
|
|
|
|
(2) Was sent to a public stream in your realm.
|
|
|
|
|
|
|
|
We produce consistent, boring error messages to avoid leaking any
|
|
|
|
information from a security perspective.
|
2021-06-03 07:04:25 +02:00
|
|
|
|
|
|
|
The lock_message parameter should be passed by callers that are
|
|
|
|
planning to modify the Message object. This will use the SQL
|
|
|
|
`SELECT FOR UPDATE` feature to ensure that other processes cannot
|
|
|
|
delete the message during the current transaction, which is
|
|
|
|
important to prevent rare race conditions. Callers must only
|
|
|
|
pass lock_message when inside a @transaction.atomic block.
|
2016-10-12 02:14:08 +02:00
|
|
|
"""
|
|
|
|
try:
|
2021-06-03 07:04:25 +02:00
|
|
|
base_query = Message.objects.select_related()
|
2021-06-03 15:46:13 +02:00
|
|
|
if lock_message:
|
2021-06-03 07:04:25 +02:00
|
|
|
# We want to lock only the `Message` row, and not the related fields
|
|
|
|
# because the `Message` row only has a possibility of races.
|
|
|
|
base_query = base_query.select_for_update(of=("self",))
|
|
|
|
message = base_query.get(id=message_id)
|
2016-10-12 02:14:08 +02:00
|
|
|
except Message.DoesNotExist:
|
|
|
|
raise JsonableError(_("Invalid message(s)"))
|
|
|
|
|
2018-07-27 11:47:07 +02:00
|
|
|
user_message = get_usermessage_by_message_id(user_profile, message_id)
|
2016-10-12 02:14:08 +02:00
|
|
|
|
2021-05-12 23:07:07 +02:00
|
|
|
if has_message_access(user_profile, message, has_user_message=user_message is not None):
|
2018-07-27 12:28:42 +02:00
|
|
|
return (message, user_message)
|
|
|
|
raise JsonableError(_("Invalid message(s)"))
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-09-20 10:34:10 +02:00
|
|
|
def access_web_public_message(
|
|
|
|
realm: Realm,
|
|
|
|
message_id: int,
|
|
|
|
) -> Message:
|
|
|
|
"""Access control method for unauthenticated requests interacting
|
2022-01-29 00:54:13 +01:00
|
|
|
with a message in web-public streams.
|
2021-09-20 10:34:10 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
# We throw a MissingAuthenticationError for all errors in this
|
|
|
|
# code path, to avoid potentially leaking information on whether a
|
|
|
|
# message with the provided ID exists on the server if the client
|
|
|
|
# shouldn't have access to it.
|
|
|
|
if not realm.web_public_streams_enabled():
|
|
|
|
raise MissingAuthenticationError()
|
|
|
|
|
|
|
|
try:
|
|
|
|
message = Message.objects.select_related().get(id=message_id)
|
|
|
|
except Message.DoesNotExist:
|
|
|
|
raise MissingAuthenticationError()
|
|
|
|
|
|
|
|
if not message.is_stream_message():
|
|
|
|
raise MissingAuthenticationError()
|
|
|
|
|
|
|
|
queryset = get_web_public_streams_queryset(realm)
|
|
|
|
try:
|
|
|
|
stream = queryset.get(id=message.recipient.type_id)
|
|
|
|
except Stream.DoesNotExist:
|
|
|
|
raise MissingAuthenticationError()
|
|
|
|
|
|
|
|
# These should all have been enforced by the code in
|
|
|
|
# get_web_public_streams_queryset
|
|
|
|
assert stream.is_web_public
|
|
|
|
assert not stream.deactivated
|
|
|
|
assert not stream.invite_only
|
|
|
|
assert stream.history_public_to_subscribers
|
|
|
|
|
|
|
|
# Now that we've confirmed this message was sent to the target
|
|
|
|
# web-public stream, we can return it as having been successfully
|
|
|
|
# accessed.
|
|
|
|
return message
|
|
|
|
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def has_message_access(
|
2021-05-12 00:31:03 +02:00
|
|
|
user_profile: UserProfile,
|
|
|
|
message: Message,
|
|
|
|
*,
|
2021-05-12 23:07:07 +02:00
|
|
|
has_user_message: bool,
|
2021-05-12 00:31:03 +02:00
|
|
|
stream: Optional[Stream] = None,
|
|
|
|
is_subscribed: Optional[bool] = None,
|
2021-02-12 08:19:30 +01:00
|
|
|
) -> bool:
|
2021-05-12 00:31:03 +02:00
|
|
|
"""
|
|
|
|
Returns whether a user has access to a given message.
|
|
|
|
|
2021-05-18 14:44:05 +02:00
|
|
|
* The user_message parameter must be provided if the user has a UserMessage
|
2021-05-12 00:31:03 +02:00
|
|
|
row for the target message.
|
|
|
|
* The optional stream parameter is validated; is_subscribed is not.
|
|
|
|
"""
|
|
|
|
|
2021-05-12 00:21:24 +02:00
|
|
|
# If you have a user_message object, you have access.
|
2021-05-12 23:07:07 +02:00
|
|
|
if has_user_message:
|
2021-05-12 00:21:24 +02:00
|
|
|
return True
|
|
|
|
|
|
|
|
if message.recipient.type != Recipient.STREAM:
|
|
|
|
# You can't access private messages you didn't receive
|
|
|
|
return False
|
|
|
|
|
2021-05-12 00:31:03 +02:00
|
|
|
if stream is None:
|
|
|
|
stream = Stream.objects.get(id=message.recipient.type_id)
|
|
|
|
else:
|
|
|
|
assert stream.recipient_id == message.recipient_id
|
|
|
|
|
2021-05-12 00:21:24 +02:00
|
|
|
if stream.realm != user_profile.realm:
|
|
|
|
# You can't access public stream messages in other realms
|
|
|
|
return False
|
|
|
|
|
|
|
|
if not stream.is_history_public_to_subscribers():
|
|
|
|
# You can't access messages you didn't directly receive
|
|
|
|
# unless history is public to subscribers.
|
|
|
|
return False
|
|
|
|
|
|
|
|
if stream.is_public() and user_profile.can_access_public_streams():
|
|
|
|
return True
|
|
|
|
|
|
|
|
# is_history_public_to_subscribers, so check if you're subscribed
|
2021-05-12 00:31:03 +02:00
|
|
|
if is_subscribed is not None:
|
|
|
|
return is_subscribed
|
|
|
|
|
2021-05-12 00:21:24 +02:00
|
|
|
return Subscription.objects.filter(
|
|
|
|
user_profile=user_profile, active=True, recipient=message.recipient
|
|
|
|
).exists()
|
2016-10-12 02:14:08 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-05-12 23:21:39 +02:00
|
|
|
def bulk_access_messages(
|
2022-06-23 20:07:19 +02:00
|
|
|
user_profile: UserProfile, messages: Collection[Message], *, stream: Optional[Stream] = None
|
2021-05-12 23:21:39 +02:00
|
|
|
) -> List[Message]:
|
2021-05-12 23:40:58 +02:00
|
|
|
"""This function does the full has_message_access check for each
|
|
|
|
message. If stream is provided, it is used to avoid unnecessary
|
|
|
|
database queries, and will use exactly 2 bulk queries instead.
|
|
|
|
|
|
|
|
Throws AssertionError if stream is passed and any of the messages
|
|
|
|
were not sent to that stream.
|
|
|
|
|
|
|
|
"""
|
2018-07-27 16:23:17 +02:00
|
|
|
filtered_messages = []
|
|
|
|
|
2021-05-12 23:13:54 +02:00
|
|
|
user_message_set = set(
|
|
|
|
bulk_access_messages_expect_usermessage(
|
|
|
|
user_profile.id, [message.id for message in messages]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2021-05-12 23:40:58 +02:00
|
|
|
# TODO: Ideally, we'd do a similar bulk-stream-fetch if stream is
|
|
|
|
# None, so that this function is fast with
|
|
|
|
|
|
|
|
subscribed_recipient_ids = set(get_subscribed_stream_recipient_ids_for_user(user_profile))
|
|
|
|
|
2018-07-27 16:23:17 +02:00
|
|
|
for message in messages:
|
2021-05-12 23:13:54 +02:00
|
|
|
has_user_message = message.id in user_message_set
|
2021-05-12 23:40:58 +02:00
|
|
|
is_subscribed = message.recipient_id in subscribed_recipient_ids
|
2021-05-12 23:21:39 +02:00
|
|
|
if has_message_access(
|
2021-05-12 23:40:58 +02:00
|
|
|
user_profile,
|
|
|
|
message,
|
|
|
|
has_user_message=has_user_message,
|
|
|
|
stream=stream,
|
|
|
|
is_subscribed=is_subscribed,
|
2021-05-12 23:21:39 +02:00
|
|
|
):
|
2018-07-27 16:23:17 +02:00
|
|
|
filtered_messages.append(message)
|
|
|
|
return filtered_messages
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-02-14 02:01:42 +01:00
|
|
|
def bulk_access_messages_expect_usermessage(
|
2021-02-12 08:19:30 +01:00
|
|
|
user_profile_id: int, message_ids: Sequence[int]
|
2022-06-23 19:58:17 +02:00
|
|
|
) -> "ValuesQuerySet[UserMessage, int]":
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2019-02-14 02:01:42 +01:00
|
|
|
Like bulk_access_messages, but faster and potentially stricter.
|
|
|
|
|
|
|
|
Returns a subset of `message_ids` containing only messages the
|
|
|
|
user can access. Makes O(1) database queries.
|
|
|
|
|
|
|
|
Use this function only when the user is expected to have a
|
|
|
|
UserMessage row for every message in `message_ids`. If a
|
|
|
|
UserMessage row is missing, the message will be omitted even if
|
|
|
|
the user has access (e.g. because it went to a public stream.)
|
|
|
|
|
|
|
|
See also: `access_message`, `bulk_access_messages`.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2019-02-14 02:01:42 +01:00
|
|
|
return UserMessage.objects.filter(
|
|
|
|
user_profile_id=user_profile_id,
|
|
|
|
message_id__in=message_ids,
|
2021-02-12 08:20:45 +01:00
|
|
|
).values_list("message_id", flat=True)
|
2019-02-14 02:01:42 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def render_markdown(
|
|
|
|
message: Message,
|
|
|
|
content: str,
|
|
|
|
realm: Optional[Realm] = None,
|
|
|
|
realm_alert_words_automaton: Optional[ahocorasick.Automaton] = None,
|
2022-04-14 21:57:20 +02:00
|
|
|
url_embed_data: Optional[Dict[str, Optional[UrlEmbedData]]] = None,
|
2021-02-12 08:19:30 +01:00
|
|
|
mention_data: Optional[MentionData] = None,
|
|
|
|
email_gateway: bool = False,
|
2021-06-17 12:20:40 +02:00
|
|
|
) -> MessageRenderingResult:
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2018-11-02 13:18:31 +01:00
|
|
|
This is basically just a wrapper for do_render_markdown.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2016-10-04 18:32:46 +02:00
|
|
|
|
2018-03-18 20:37:47 +01:00
|
|
|
if realm is None:
|
|
|
|
realm = message.get_realm()
|
2016-10-04 18:32:46 +02:00
|
|
|
|
2020-11-04 13:54:10 +01:00
|
|
|
sender = message.sender
|
2018-11-02 12:50:09 +01:00
|
|
|
sent_by_bot = sender.is_bot
|
|
|
|
translate_emoticons = sender.translate_emoticons
|
2017-02-03 23:21:56 +01:00
|
|
|
|
2021-06-17 12:20:40 +02:00
|
|
|
rendering_result = markdown_convert(
|
2017-10-24 02:47:09 +02:00
|
|
|
content,
|
2019-02-11 15:19:38 +01:00
|
|
|
realm_alert_words_automaton=realm_alert_words_automaton,
|
2017-10-24 02:47:09 +02:00
|
|
|
message=message,
|
|
|
|
message_realm=realm,
|
|
|
|
sent_by_bot=sent_by_bot,
|
2018-11-02 12:50:09 +01:00
|
|
|
translate_emoticons=translate_emoticons,
|
2022-04-14 21:57:20 +02:00
|
|
|
url_embed_data=url_embed_data,
|
2017-10-24 02:47:09 +02:00
|
|
|
mention_data=mention_data,
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
email_gateway=email_gateway,
|
2017-10-24 02:47:09 +02:00
|
|
|
)
|
2021-06-24 13:49:03 +02:00
|
|
|
|
2021-06-17 12:20:40 +02:00
|
|
|
return rendering_result
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def huddle_users(recipient_id: int) -> str:
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
display_recipient: DisplayRecipientT = get_display_recipient_by_id(
|
2021-02-12 08:19:30 +01:00
|
|
|
recipient_id,
|
|
|
|
Recipient.HUDDLE,
|
|
|
|
None,
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
)
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
# str is for streams.
|
|
|
|
assert not isinstance(display_recipient, str)
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
user_ids: List[int] = [obj["id"] for obj in display_recipient]
|
2017-05-23 03:02:01 +02:00
|
|
|
user_ids = sorted(user_ids)
|
2021-02-12 08:20:45 +01:00
|
|
|
return ",".join(str(uid) for uid in user_ids)
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def get_inactive_recipient_ids(user_profile: UserProfile) -> List[int]:
|
2021-02-12 08:19:30 +01:00
|
|
|
rows = (
|
|
|
|
get_stream_subscriptions_for_user(user_profile)
|
|
|
|
.filter(
|
|
|
|
active=False,
|
|
|
|
)
|
|
|
|
.values(
|
2021-02-12 08:20:45 +01:00
|
|
|
"recipient_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2017-08-09 02:22:00 +02:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
inactive_recipient_ids = [row["recipient_id"] for row in rows]
|
2017-08-09 02:22:00 +02:00
|
|
|
return inactive_recipient_ids
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def get_muted_stream_ids(user_profile: UserProfile) -> List[int]:
|
2021-02-12 08:19:30 +01:00
|
|
|
rows = (
|
|
|
|
get_stream_subscriptions_for_user(user_profile)
|
|
|
|
.filter(
|
|
|
|
active=True,
|
|
|
|
is_muted=True,
|
|
|
|
)
|
|
|
|
.values(
|
2021-02-12 08:20:45 +01:00
|
|
|
"recipient__type_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2017-08-23 22:45:50 +02:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
muted_stream_ids = [row["recipient__type_id"] for row in rows]
|
2017-10-05 16:18:13 +02:00
|
|
|
return muted_stream_ids
|
2017-08-23 22:45:50 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-08-14 23:57:20 +02:00
|
|
|
def get_starred_message_ids(user_profile: UserProfile) -> List[int]:
|
2021-02-12 08:19:30 +01:00
|
|
|
return list(
|
|
|
|
UserMessage.objects.filter(
|
|
|
|
user_profile=user_profile,
|
|
|
|
)
|
|
|
|
.extra(
|
|
|
|
where=[UserMessage.where_starred()],
|
|
|
|
)
|
|
|
|
.order_by(
|
2021-02-12 08:20:45 +01:00
|
|
|
"message_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
.values_list("message_id", flat=True)[0:10000]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
|
2018-08-14 23:57:20 +02:00
|
|
|
|
2021-06-09 13:31:39 +02:00
|
|
|
def get_raw_unread_data(
|
|
|
|
user_profile: UserProfile, message_ids: Optional[List[int]] = None
|
|
|
|
) -> RawUnreadMessagesResult:
|
2017-08-09 02:22:00 +02:00
|
|
|
excluded_recipient_ids = get_inactive_recipient_ids(user_profile)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
user_msgs = (
|
|
|
|
UserMessage.objects.filter(
|
|
|
|
user_profile=user_profile,
|
|
|
|
)
|
|
|
|
.exclude(
|
|
|
|
message__recipient_id__in=excluded_recipient_ids,
|
|
|
|
)
|
|
|
|
.values(
|
2021-02-12 08:20:45 +01:00
|
|
|
"message_id",
|
|
|
|
"message__sender_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
MESSAGE__TOPIC,
|
2021-02-12 08:20:45 +01:00
|
|
|
"message__recipient_id",
|
|
|
|
"message__recipient__type",
|
|
|
|
"message__recipient__type_id",
|
|
|
|
"flags",
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
.order_by("-message_id")
|
|
|
|
)
|
2017-08-01 18:28:56 +02:00
|
|
|
|
2021-06-09 13:31:39 +02:00
|
|
|
if message_ids is not None:
|
|
|
|
# When users are marking just a few messages as unread, we just need
|
|
|
|
# those ids, and we know they're unread.
|
|
|
|
user_msgs = user_msgs.filter(message_id__in=message_ids)
|
|
|
|
else:
|
|
|
|
# At page load we need all unread messages.
|
|
|
|
user_msgs = user_msgs.extra(
|
|
|
|
where=[UserMessage.where_unread()],
|
|
|
|
)
|
|
|
|
|
2017-08-01 18:28:56 +02:00
|
|
|
# Limit unread messages for performance reasons.
|
|
|
|
user_msgs = list(user_msgs[:MAX_UNREAD_MESSAGES])
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2017-08-01 18:28:56 +02:00
|
|
|
rows = list(reversed(user_msgs))
|
2020-09-27 19:12:24 +02:00
|
|
|
return extract_unread_data_from_um_rows(rows, user_profile)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-09-27 19:12:24 +02:00
|
|
|
def extract_unread_data_from_um_rows(
|
2021-02-12 08:19:30 +01:00
|
|
|
rows: List[Dict[str, Any]], user_profile: Optional[UserProfile]
|
2020-09-27 19:12:24 +02:00
|
|
|
) -> RawUnreadMessagesResult:
|
2017-08-23 22:45:50 +02:00
|
|
|
|
2021-07-09 20:34:02 +02:00
|
|
|
pm_dict: Dict[int, RawUnreadPrivateMessageDict] = {}
|
|
|
|
stream_dict: Dict[int, RawUnreadStreamDict] = {}
|
2020-09-27 19:12:52 +02:00
|
|
|
unmuted_stream_msgs: Set[int] = set()
|
2021-07-09 20:34:02 +02:00
|
|
|
huddle_dict: Dict[int, RawUnreadHuddleDict] = {}
|
2020-09-27 19:12:52 +02:00
|
|
|
mentions: Set[int] = set()
|
2021-03-18 22:33:52 +01:00
|
|
|
total_unreads = 0
|
2020-09-27 19:12:52 +02:00
|
|
|
|
|
|
|
raw_unread_messages: RawUnreadMessagesResult = dict(
|
|
|
|
pm_dict=pm_dict,
|
|
|
|
stream_dict=stream_dict,
|
|
|
|
muted_stream_ids=[],
|
|
|
|
unmuted_stream_msgs=unmuted_stream_msgs,
|
|
|
|
huddle_dict=huddle_dict,
|
|
|
|
mentions=mentions,
|
2021-03-18 22:33:52 +01:00
|
|
|
old_unreads_missing=False,
|
2020-09-27 19:12:52 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if user_profile is None:
|
2020-09-27 06:49:16 +02:00
|
|
|
return raw_unread_messages
|
2020-09-27 19:12:52 +02:00
|
|
|
|
2017-10-05 16:18:13 +02:00
|
|
|
muted_stream_ids = get_muted_stream_ids(user_profile)
|
2021-02-12 08:20:45 +01:00
|
|
|
raw_unread_messages["muted_stream_ids"] = muted_stream_ids
|
2017-08-31 23:19:05 +02:00
|
|
|
|
|
|
|
topic_mute_checker = build_topic_mute_checker(user_profile)
|
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def is_row_muted(stream_id: int, recipient_id: int, topic: str) -> bool:
|
2017-10-05 16:18:13 +02:00
|
|
|
if stream_id in muted_stream_ids:
|
2017-08-31 23:19:05 +02:00
|
|
|
return True
|
|
|
|
|
2017-10-04 18:13:04 +02:00
|
|
|
if topic_mute_checker(recipient_id, topic):
|
2017-08-31 23:19:05 +02:00
|
|
|
return True
|
|
|
|
|
2021-02-05 05:58:44 +01:00
|
|
|
# Messages sent by muted users are never unread, so we don't
|
|
|
|
# need any logic related to muted users here.
|
|
|
|
|
2017-08-31 23:19:05 +02:00
|
|
|
return False
|
|
|
|
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
huddle_cache: Dict[int, str] = {}
|
2017-10-04 18:13:04 +02:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def get_huddle_users(recipient_id: int) -> str:
|
2017-10-04 18:13:04 +02:00
|
|
|
if recipient_id in huddle_cache:
|
|
|
|
return huddle_cache[recipient_id]
|
2017-08-23 22:45:50 +02:00
|
|
|
|
2017-10-04 18:13:04 +02:00
|
|
|
user_ids_string = huddle_users(recipient_id)
|
|
|
|
huddle_cache[recipient_id] = user_ids_string
|
|
|
|
return user_ids_string
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2017-10-04 18:13:04 +02:00
|
|
|
for row in rows:
|
2021-03-18 22:33:52 +01:00
|
|
|
total_unreads += 1
|
2021-02-12 08:20:45 +01:00
|
|
|
message_id = row["message_id"]
|
|
|
|
msg_type = row["message__recipient__type"]
|
|
|
|
recipient_id = row["message__recipient_id"]
|
|
|
|
sender_id = row["message__sender_id"]
|
2017-10-04 18:13:04 +02:00
|
|
|
|
|
|
|
if msg_type == Recipient.STREAM:
|
2021-02-12 08:20:45 +01:00
|
|
|
stream_id = row["message__recipient__type_id"]
|
2018-11-08 15:37:37 +01:00
|
|
|
topic = row[MESSAGE__TOPIC]
|
2017-10-04 18:13:04 +02:00
|
|
|
stream_dict[message_id] = dict(
|
|
|
|
stream_id=stream_id,
|
|
|
|
topic=topic,
|
|
|
|
)
|
2017-10-05 16:18:13 +02:00
|
|
|
if not is_row_muted(stream_id, recipient_id, topic):
|
2017-10-04 18:13:04 +02:00
|
|
|
unmuted_stream_msgs.add(message_id)
|
|
|
|
|
|
|
|
elif msg_type == Recipient.PERSONAL:
|
2020-03-17 23:17:12 +01:00
|
|
|
if sender_id == user_profile.id:
|
2021-02-12 08:20:45 +01:00
|
|
|
other_user_id = row["message__recipient__type_id"]
|
2020-03-17 23:17:12 +01:00
|
|
|
else:
|
|
|
|
other_user_id = sender_id
|
|
|
|
|
2017-10-04 18:13:04 +02:00
|
|
|
pm_dict[message_id] = dict(
|
2022-03-07 16:47:49 +01:00
|
|
|
other_user_id=other_user_id,
|
2017-10-04 18:13:04 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
elif msg_type == Recipient.HUDDLE:
|
|
|
|
user_ids_string = get_huddle_users(recipient_id)
|
|
|
|
huddle_dict[message_id] = dict(
|
|
|
|
user_ids_string=user_ids_string,
|
|
|
|
)
|
|
|
|
|
2019-08-26 05:11:18 +02:00
|
|
|
# TODO: Add support for alert words here as well.
|
2021-02-12 08:20:45 +01:00
|
|
|
is_mentioned = (row["flags"] & UserMessage.flags.mentioned) != 0
|
|
|
|
is_wildcard_mentioned = (row["flags"] & UserMessage.flags.wildcard_mentioned) != 0
|
2017-10-04 18:13:04 +02:00
|
|
|
if is_mentioned:
|
|
|
|
mentions.add(message_id)
|
2019-08-26 05:11:18 +02:00
|
|
|
if is_wildcard_mentioned:
|
|
|
|
if msg_type == Recipient.STREAM:
|
2021-02-12 08:20:45 +01:00
|
|
|
stream_id = row["message__recipient__type_id"]
|
2019-08-26 05:11:18 +02:00
|
|
|
topic = row[MESSAGE__TOPIC]
|
|
|
|
if not is_row_muted(stream_id, recipient_id, topic):
|
|
|
|
mentions.add(message_id)
|
|
|
|
else: # nocoverage # TODO: Test wildcard mentions in PMs.
|
|
|
|
mentions.add(message_id)
|
2017-10-04 18:13:04 +02:00
|
|
|
|
2021-03-18 22:33:52 +01:00
|
|
|
# Record whether the user had more than MAX_UNREAD_MESSAGES total
|
|
|
|
# unreads -- that's a state where Zulip's behavior will start to
|
|
|
|
# be erroneous, and clients should display a warning.
|
|
|
|
raw_unread_messages["old_unreads_missing"] = total_unreads == MAX_UNREAD_MESSAGES
|
|
|
|
|
2020-09-27 19:12:52 +02:00
|
|
|
return raw_unread_messages
|
2017-10-04 18:13:04 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2022-03-07 15:12:24 +01:00
|
|
|
def aggregate_streams(*, input_dict: Dict[int, RawUnreadStreamDict]) -> List[UnreadStreamInfo]:
|
|
|
|
lookup_dict: Dict[Tuple[int, str], UnreadStreamInfo] = {}
|
|
|
|
for message_id, attribute_dict in input_dict.items():
|
|
|
|
stream_id = attribute_dict["stream_id"]
|
|
|
|
topic = attribute_dict["topic"]
|
|
|
|
lookup_key = (stream_id, topic)
|
|
|
|
if lookup_key not in lookup_dict:
|
|
|
|
obj = UnreadStreamInfo(
|
|
|
|
stream_id=stream_id,
|
|
|
|
topic=topic,
|
|
|
|
unread_message_ids=[],
|
|
|
|
)
|
|
|
|
lookup_dict[lookup_key] = obj
|
|
|
|
|
|
|
|
bucket = lookup_dict[lookup_key]
|
|
|
|
bucket["unread_message_ids"].append(message_id)
|
|
|
|
|
|
|
|
for dct in lookup_dict.values():
|
|
|
|
dct["unread_message_ids"].sort()
|
|
|
|
|
|
|
|
sorted_keys = sorted(lookup_dict.keys())
|
|
|
|
|
|
|
|
return [lookup_dict[k] for k in sorted_keys]
|
|
|
|
|
|
|
|
|
|
|
|
def aggregate_pms(
|
|
|
|
*, input_dict: Dict[int, RawUnreadPrivateMessageDict]
|
|
|
|
) -> List[UnreadPrivateMessageInfo]:
|
|
|
|
lookup_dict: Dict[int, UnreadPrivateMessageInfo] = {}
|
|
|
|
for message_id, attribute_dict in input_dict.items():
|
2022-03-07 16:47:49 +01:00
|
|
|
other_user_id = attribute_dict["other_user_id"]
|
2022-03-07 15:12:24 +01:00
|
|
|
if other_user_id not in lookup_dict:
|
2022-03-07 16:47:49 +01:00
|
|
|
# The `sender_id` field here is only supported for
|
|
|
|
# legacy mobile clients. Its actual semantics are the same
|
|
|
|
# as `other_user_id`.
|
2022-03-07 15:12:24 +01:00
|
|
|
obj = UnreadPrivateMessageInfo(
|
2022-03-07 16:47:49 +01:00
|
|
|
other_user_id=other_user_id,
|
2022-03-07 15:12:24 +01:00
|
|
|
sender_id=other_user_id,
|
|
|
|
unread_message_ids=[],
|
|
|
|
)
|
|
|
|
lookup_dict[other_user_id] = obj
|
|
|
|
|
|
|
|
bucket = lookup_dict[other_user_id]
|
|
|
|
bucket["unread_message_ids"].append(message_id)
|
|
|
|
|
|
|
|
for dct in lookup_dict.values():
|
|
|
|
dct["unread_message_ids"].sort()
|
|
|
|
|
|
|
|
sorted_keys = sorted(lookup_dict.keys())
|
|
|
|
|
|
|
|
return [lookup_dict[k] for k in sorted_keys]
|
|
|
|
|
|
|
|
|
|
|
|
def aggregate_huddles(*, input_dict: Dict[int, RawUnreadHuddleDict]) -> List[UnreadHuddleInfo]:
|
|
|
|
lookup_dict: Dict[str, UnreadHuddleInfo] = {}
|
|
|
|
for message_id, attribute_dict in input_dict.items():
|
|
|
|
user_ids_string = attribute_dict["user_ids_string"]
|
|
|
|
if user_ids_string not in lookup_dict:
|
|
|
|
obj = UnreadHuddleInfo(
|
|
|
|
user_ids_string=user_ids_string,
|
|
|
|
unread_message_ids=[],
|
|
|
|
)
|
|
|
|
lookup_dict[user_ids_string] = obj
|
|
|
|
|
|
|
|
bucket = lookup_dict[user_ids_string]
|
|
|
|
bucket["unread_message_ids"].append(message_id)
|
|
|
|
|
|
|
|
for dct in lookup_dict.values():
|
|
|
|
dct["unread_message_ids"].sort()
|
|
|
|
|
|
|
|
sorted_keys = sorted(lookup_dict.keys())
|
|
|
|
|
|
|
|
return [lookup_dict[k] for k in sorted_keys]
|
|
|
|
|
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def aggregate_unread_data(raw_data: RawUnreadMessagesResult) -> UnreadMessagesResult:
|
2017-10-04 18:13:04 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
pm_dict = raw_data["pm_dict"]
|
|
|
|
stream_dict = raw_data["stream_dict"]
|
|
|
|
unmuted_stream_msgs = raw_data["unmuted_stream_msgs"]
|
|
|
|
huddle_dict = raw_data["huddle_dict"]
|
|
|
|
mentions = list(raw_data["mentions"])
|
2017-10-04 18:13:04 +02:00
|
|
|
|
|
|
|
count = len(pm_dict) + len(unmuted_stream_msgs) + len(huddle_dict)
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2022-03-07 15:12:24 +01:00
|
|
|
pm_objects = aggregate_pms(input_dict=pm_dict)
|
|
|
|
stream_objects = aggregate_streams(input_dict=stream_dict)
|
|
|
|
huddle_objects = aggregate_huddles(input_dict=huddle_dict)
|
2017-05-23 03:02:01 +02:00
|
|
|
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
result: UnreadMessagesResult = dict(
|
2017-05-23 03:02:01 +02:00
|
|
|
pms=pm_objects,
|
|
|
|
streams=stream_objects,
|
|
|
|
huddles=huddle_objects,
|
2017-10-04 18:13:04 +02:00
|
|
|
mentions=mentions,
|
2021-02-12 08:19:30 +01:00
|
|
|
count=count,
|
2021-03-18 22:33:52 +01:00
|
|
|
old_unreads_missing=raw_data["old_unreads_missing"],
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2017-05-23 03:02:01 +02:00
|
|
|
|
|
|
|
return result
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def apply_unread_message_event(
|
|
|
|
user_profile: UserProfile,
|
|
|
|
state: RawUnreadMessagesResult,
|
|
|
|
message: Dict[str, Any],
|
|
|
|
flags: List[str],
|
|
|
|
) -> None:
|
2021-02-12 08:20:45 +01:00
|
|
|
message_id = message["id"]
|
|
|
|
if message["type"] == "stream":
|
|
|
|
message_type = "stream"
|
|
|
|
elif message["type"] == "private":
|
|
|
|
others = [recip for recip in message["display_recipient"] if recip["id"] != user_profile.id]
|
2017-05-23 03:02:01 +02:00
|
|
|
if len(others) <= 1:
|
2021-02-12 08:20:45 +01:00
|
|
|
message_type = "private"
|
2017-05-23 03:02:01 +02:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
message_type = "huddle"
|
2017-08-25 09:39:36 +02:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
raise AssertionError("Invalid message type {}".format(message["type"]))
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if message_type == "stream":
|
|
|
|
stream_id = message["stream_id"]
|
2018-11-08 15:37:37 +01:00
|
|
|
topic = message[TOPIC_NAME]
|
2021-07-09 20:34:02 +02:00
|
|
|
state["stream_dict"][message_id] = RawUnreadStreamDict(
|
2017-05-23 03:02:01 +02:00
|
|
|
stream_id=stream_id,
|
|
|
|
topic=topic,
|
|
|
|
)
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if stream_id not in state["muted_stream_ids"]:
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
# This next check hits the database.
|
|
|
|
if not topic_is_muted(user_profile, stream_id, topic):
|
2021-02-12 08:20:45 +01:00
|
|
|
state["unmuted_stream_msgs"].add(message_id)
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
elif message_type == "private":
|
2020-03-17 23:17:12 +01:00
|
|
|
if len(others) == 1:
|
2022-03-07 16:47:49 +01:00
|
|
|
other_user_id = others[0]["id"]
|
2020-03-17 23:17:12 +01:00
|
|
|
else:
|
2022-03-07 16:47:49 +01:00
|
|
|
other_user_id = user_profile.id
|
2020-03-17 23:17:12 +01:00
|
|
|
|
2021-07-09 20:34:02 +02:00
|
|
|
state["pm_dict"][message_id] = RawUnreadPrivateMessageDict(
|
2022-03-07 16:47:49 +01:00
|
|
|
other_user_id=other_user_id,
|
2017-05-23 03:02:01 +02:00
|
|
|
)
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
|
2017-05-23 03:02:01 +02:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
display_recipient = message["display_recipient"]
|
|
|
|
user_ids = [obj["id"] for obj in display_recipient]
|
2017-05-23 03:02:01 +02:00
|
|
|
user_ids = sorted(user_ids)
|
2021-02-12 08:20:45 +01:00
|
|
|
user_ids_string = ",".join(str(uid) for uid in user_ids)
|
2021-07-09 20:34:02 +02:00
|
|
|
|
|
|
|
state["huddle_dict"][message_id] = RawUnreadHuddleDict(
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
user_ids_string=user_ids_string,
|
2017-05-23 03:02:01 +02:00
|
|
|
)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "mentioned" in flags:
|
|
|
|
state["mentions"].add(message_id)
|
|
|
|
if "wildcard_mentioned" in flags:
|
|
|
|
if message_id in state["unmuted_stream_msgs"]:
|
|
|
|
state["mentions"].add(message_id)
|
2018-01-02 18:33:28 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def remove_message_id_from_unread_mgs(state: RawUnreadMessagesResult, message_id: int) -> None:
|
2019-08-03 02:24:00 +02:00
|
|
|
# The opposite of apply_unread_message_event; removes a read or
|
|
|
|
# deleted message from a raw_unread_msgs data structure.
|
2021-02-12 08:20:45 +01:00
|
|
|
state["pm_dict"].pop(message_id, None)
|
|
|
|
state["stream_dict"].pop(message_id, None)
|
|
|
|
state["huddle_dict"].pop(message_id, None)
|
|
|
|
state["unmuted_stream_msgs"].discard(message_id)
|
|
|
|
state["mentions"].discard(message_id)
|
2019-08-03 02:24:00 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-06-09 13:31:39 +02:00
|
|
|
def format_unread_message_details(
|
|
|
|
my_user_id: int,
|
|
|
|
raw_unread_data: RawUnreadMessagesResult,
|
|
|
|
) -> Dict[str, MessageDetailsDict]:
|
|
|
|
unread_data = {}
|
|
|
|
|
|
|
|
for message_id, private_message_details in raw_unread_data["pm_dict"].items():
|
|
|
|
other_user_id = private_message_details["other_user_id"]
|
|
|
|
if other_user_id == my_user_id:
|
|
|
|
user_ids = []
|
|
|
|
else:
|
|
|
|
user_ids = [other_user_id]
|
|
|
|
|
|
|
|
# Note that user_ids excludes ourself, even for the case we send messages
|
|
|
|
# to ourself.
|
|
|
|
message_details = MessageDetailsDict(
|
|
|
|
type="private",
|
|
|
|
user_ids=user_ids,
|
|
|
|
)
|
|
|
|
if message_id in raw_unread_data["mentions"]:
|
|
|
|
message_details["mentioned"] = True
|
|
|
|
unread_data[str(message_id)] = message_details
|
|
|
|
|
|
|
|
for message_id, stream_message_details in raw_unread_data["stream_dict"].items():
|
|
|
|
if message_id in raw_unread_data["unmuted_stream_msgs"]:
|
|
|
|
unmuted_stream_msg = True
|
|
|
|
else:
|
|
|
|
unmuted_stream_msg = False
|
|
|
|
|
|
|
|
message_details = MessageDetailsDict(
|
|
|
|
type="stream",
|
|
|
|
stream_id=stream_message_details["stream_id"],
|
|
|
|
topic=stream_message_details["topic"],
|
|
|
|
# Clients don't need this detail, but we need it internally for apply_events.
|
|
|
|
unmuted_stream_msg=unmuted_stream_msg,
|
|
|
|
)
|
|
|
|
if message_id in raw_unread_data["mentions"]:
|
|
|
|
message_details["mentioned"] = True
|
|
|
|
unread_data[str(message_id)] = message_details
|
|
|
|
|
|
|
|
for message_id, huddle_message_details in raw_unread_data["huddle_dict"].items():
|
|
|
|
# The client wants a list of user_ids in the conversation, excluding ourself,
|
|
|
|
# that is sorted in numerical order.
|
|
|
|
user_ids = [int(s) for s in huddle_message_details["user_ids_string"].split(",")]
|
|
|
|
user_ids = [user_id for user_id in user_ids if user_id != my_user_id]
|
|
|
|
user_ids.sort()
|
|
|
|
message_details = MessageDetailsDict(
|
|
|
|
type="private",
|
|
|
|
user_ids=user_ids,
|
|
|
|
)
|
|
|
|
if message_id in raw_unread_data["mentions"]:
|
|
|
|
message_details["mentioned"] = True
|
|
|
|
unread_data[str(message_id)] = message_details
|
|
|
|
|
|
|
|
return unread_data
|
|
|
|
|
|
|
|
|
|
|
|
def add_message_to_unread_msgs(
|
|
|
|
my_user_id: int,
|
|
|
|
state: RawUnreadMessagesResult,
|
|
|
|
message_id: int,
|
|
|
|
message_details: MessageDetailsDict,
|
|
|
|
) -> None:
|
|
|
|
if message_details.get("mentioned"):
|
|
|
|
state["mentions"].add(message_id)
|
|
|
|
|
|
|
|
if message_details["type"] == "private":
|
|
|
|
user_ids: List[int] = message_details["user_ids"]
|
|
|
|
user_ids = [user_id for user_id in user_ids if user_id != my_user_id]
|
|
|
|
if user_ids == []:
|
|
|
|
state["pm_dict"][message_id] = RawUnreadPrivateMessageDict(
|
|
|
|
other_user_id=my_user_id,
|
|
|
|
)
|
|
|
|
elif len(user_ids) == 1:
|
|
|
|
state["pm_dict"][message_id] = RawUnreadPrivateMessageDict(
|
|
|
|
other_user_id=user_ids[0],
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
user_ids.append(my_user_id)
|
|
|
|
user_ids_string = ",".join(str(user_id) for user_id in sorted(user_ids))
|
|
|
|
state["huddle_dict"][message_id] = RawUnreadHuddleDict(
|
|
|
|
user_ids_string=user_ids_string,
|
|
|
|
)
|
|
|
|
elif message_details["type"] == "stream":
|
|
|
|
state["stream_dict"][message_id] = RawUnreadStreamDict(
|
|
|
|
stream_id=message_details["stream_id"],
|
|
|
|
topic=message_details["topic"],
|
|
|
|
)
|
|
|
|
if message_details["unmuted_stream_msg"]:
|
|
|
|
state["unmuted_stream_msgs"].add(message_id)
|
|
|
|
|
|
|
|
|
2018-01-22 21:50:22 +01:00
|
|
|
def estimate_recent_messages(realm: Realm, hours: int) -> int:
|
2021-02-12 08:20:45 +01:00
|
|
|
stat = COUNT_STATS["messages_sent:is_bot:hour"]
|
2018-01-22 21:50:22 +01:00
|
|
|
d = timezone_now() - datetime.timedelta(hours=hours)
|
2021-02-12 08:19:30 +01:00
|
|
|
return (
|
|
|
|
RealmCount.objects.filter(property=stat.property, end_time__gt=d, realm=realm).aggregate(
|
2021-02-12 08:20:45 +01:00
|
|
|
Sum("value")
|
|
|
|
)["value__sum"]
|
2021-02-12 08:19:30 +01:00
|
|
|
or 0
|
|
|
|
)
|
|
|
|
|
2018-01-04 13:49:39 +01:00
|
|
|
|
2018-01-22 21:50:22 +01:00
|
|
|
def get_first_visible_message_id(realm: Realm) -> int:
|
2018-10-25 07:54:37 +02:00
|
|
|
return realm.first_visible_message_id
|
2018-01-22 21:50:22 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-01-22 21:50:22 +01:00
|
|
|
def maybe_update_first_visible_message_id(realm: Realm, lookback_hours: int) -> None:
|
|
|
|
recent_messages_count = estimate_recent_messages(realm, lookback_hours)
|
2018-10-25 07:54:37 +02:00
|
|
|
if realm.message_visibility_limit is not None and recent_messages_count > 0:
|
2018-01-22 21:50:22 +01:00
|
|
|
update_first_visible_message_id(realm)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-01-22 21:50:22 +01:00
|
|
|
def update_first_visible_message_id(realm: Realm) -> None:
|
2018-10-25 07:54:37 +02:00
|
|
|
if realm.message_visibility_limit is None:
|
|
|
|
realm.first_visible_message_id = 0
|
|
|
|
else:
|
|
|
|
try:
|
2021-02-12 08:19:30 +01:00
|
|
|
first_visible_message_id = (
|
|
|
|
Message.objects.filter(sender__realm=realm)
|
2021-02-12 08:20:45 +01:00
|
|
|
.values("id")
|
|
|
|
.order_by("-id")[realm.message_visibility_limit - 1]["id"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2018-10-25 07:54:37 +02:00
|
|
|
except IndexError:
|
|
|
|
first_visible_message_id = 0
|
|
|
|
realm.first_visible_message_id = first_visible_message_id
|
|
|
|
realm.save(update_fields=["first_visible_message_id"])
|
2019-03-20 04:15:58 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-10-13 15:49:40 +02:00
|
|
|
def get_last_message_id() -> int:
|
|
|
|
# We generally use this function to populate RealmAuditLog, and
|
2022-02-08 00:13:33 +01:00
|
|
|
# the max id here is actually system-wide, not per-realm. I
|
2020-10-13 15:49:40 +02:00
|
|
|
# assume there's some advantage in not filtering by realm.
|
2021-02-12 08:20:45 +01:00
|
|
|
last_id = Message.objects.aggregate(Max("id"))["id__max"]
|
2020-10-13 15:49:40 +02:00
|
|
|
if last_id is None:
|
|
|
|
# During initial realm creation, there might be 0 messages in
|
|
|
|
# the database; in that case, the `aggregate` query returns
|
|
|
|
# None. Since we want an int for "beginning of time", use -1.
|
|
|
|
last_id = -1
|
|
|
|
return last_id
|
2019-03-20 04:15:58 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def get_recent_conversations_recipient_id(
|
|
|
|
user_profile: UserProfile, recipient_id: int, sender_id: int
|
|
|
|
) -> int:
|
2019-03-20 04:15:58 +01:00
|
|
|
"""Helper for doing lookups of the recipient_id that
|
|
|
|
get_recent_private_conversations would have used to record that
|
|
|
|
message in its data structure.
|
|
|
|
"""
|
2021-02-04 18:15:38 +01:00
|
|
|
my_recipient_id = user_profile.recipient_id
|
2019-03-20 04:15:58 +01:00
|
|
|
if recipient_id == my_recipient_id:
|
2021-02-12 08:20:45 +01:00
|
|
|
return UserProfile.objects.values_list("recipient_id", flat=True).get(id=sender_id)
|
2019-03-20 04:15:58 +01:00
|
|
|
return recipient_id
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-03-20 04:15:58 +01:00
|
|
|
def get_recent_private_conversations(user_profile: UserProfile) -> Dict[int, Dict[str, Any]]:
|
|
|
|
"""This function uses some carefully optimized SQL queries, designed
|
|
|
|
to use the UserMessage index on private_messages. It is
|
|
|
|
significantly complicated by the fact that for 1:1 private
|
|
|
|
messages, we store the message against a recipient_id of whichever
|
|
|
|
user was the recipient, and thus for 1:1 private messages sent
|
|
|
|
directly to us, we need to look up the other user from the
|
|
|
|
sender_id on those messages. You'll see that pattern repeated
|
|
|
|
both here and also in zerver/lib/events.py.
|
|
|
|
|
|
|
|
Ideally, we would write these queries using Django, but even
|
|
|
|
without the UNION ALL, that seems to not be possible, because the
|
|
|
|
equivalent Django syntax (for the first part of this query):
|
|
|
|
|
|
|
|
message_data = UserMessage.objects.select_related("message__recipient_id").filter(
|
|
|
|
user_profile=user_profile,
|
|
|
|
).extra(
|
|
|
|
where=[UserMessage.where_private()]
|
|
|
|
).order_by("-message_id")[:1000].values(
|
|
|
|
"message__recipient_id").annotate(last_message_id=Max("message_id"))
|
|
|
|
|
|
|
|
does not properly nest the GROUP BY (from .annotate) with the slicing.
|
|
|
|
|
|
|
|
We return a dictionary structure for convenient modification
|
|
|
|
below; this structure is converted into its final form by
|
|
|
|
post_process.
|
|
|
|
|
|
|
|
"""
|
|
|
|
RECENT_CONVERSATIONS_LIMIT = 1000
|
|
|
|
|
|
|
|
recipient_map = {}
|
2019-11-28 20:31:18 +01:00
|
|
|
my_recipient_id = user_profile.recipient_id
|
2019-03-20 04:15:58 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
query = SQL(
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2019-03-20 04:15:58 +01:00
|
|
|
SELECT
|
|
|
|
subquery.recipient_id, MAX(subquery.message_id)
|
|
|
|
FROM (
|
|
|
|
(SELECT
|
|
|
|
um.message_id AS message_id,
|
|
|
|
m.recipient_id AS recipient_id
|
|
|
|
FROM
|
|
|
|
zerver_usermessage um
|
|
|
|
JOIN
|
|
|
|
zerver_message m
|
|
|
|
ON
|
|
|
|
um.message_id = m.id
|
|
|
|
WHERE
|
2020-06-09 11:57:28 +02:00
|
|
|
um.user_profile_id=%(user_profile_id)s AND
|
2019-03-20 04:15:58 +01:00
|
|
|
um.flags & 2048 <> 0 AND
|
2020-06-09 11:57:28 +02:00
|
|
|
m.recipient_id <> %(my_recipient_id)s
|
2019-03-20 04:15:58 +01:00
|
|
|
ORDER BY message_id DESC
|
2020-06-09 11:57:28 +02:00
|
|
|
LIMIT %(conversation_limit)s)
|
2019-03-20 04:15:58 +01:00
|
|
|
UNION ALL
|
|
|
|
(SELECT
|
2019-11-28 20:31:18 +01:00
|
|
|
m.id AS message_id,
|
|
|
|
sender_profile.recipient_id AS recipient_id
|
2019-03-20 04:15:58 +01:00
|
|
|
FROM
|
|
|
|
zerver_message m
|
|
|
|
JOIN
|
2019-11-28 20:31:18 +01:00
|
|
|
zerver_userprofile sender_profile
|
2019-03-20 04:15:58 +01:00
|
|
|
ON
|
2019-11-28 20:31:18 +01:00
|
|
|
m.sender_id = sender_profile.id
|
2019-03-20 04:15:58 +01:00
|
|
|
WHERE
|
2020-06-09 11:57:28 +02:00
|
|
|
m.recipient_id=%(my_recipient_id)s
|
2019-03-20 04:15:58 +01:00
|
|
|
ORDER BY message_id DESC
|
2020-06-09 11:57:28 +02:00
|
|
|
LIMIT %(conversation_limit)s)
|
2019-03-20 04:15:58 +01:00
|
|
|
) AS subquery
|
|
|
|
GROUP BY subquery.recipient_id
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-03-20 04:15:58 +01:00
|
|
|
|
2020-06-09 11:57:28 +02:00
|
|
|
with connection.cursor() as cursor:
|
2021-02-12 08:19:30 +01:00
|
|
|
cursor.execute(
|
|
|
|
query,
|
|
|
|
{
|
|
|
|
"user_profile_id": user_profile.id,
|
|
|
|
"conversation_limit": RECENT_CONVERSATIONS_LIMIT,
|
|
|
|
"my_recipient_id": my_recipient_id,
|
|
|
|
},
|
|
|
|
)
|
2020-06-09 11:57:28 +02:00
|
|
|
rows = cursor.fetchall()
|
2019-03-20 04:15:58 +01:00
|
|
|
|
|
|
|
# The resulting rows will be (recipient_id, max_message_id)
|
|
|
|
# objects for all parties we've had recent (group?) private
|
|
|
|
# message conversations with, including PMs with yourself (those
|
|
|
|
# will generate an empty list of user_ids).
|
|
|
|
for recipient_id, max_message_id in rows:
|
|
|
|
recipient_map[recipient_id] = dict(
|
|
|
|
max_message_id=max_message_id,
|
2020-09-02 08:17:06 +02:00
|
|
|
user_ids=[],
|
2019-03-20 04:15:58 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
# Now we need to map all the recipient_id objects to lists of user IDs
|
2021-02-12 08:19:30 +01:00
|
|
|
for (recipient_id, user_profile_id) in (
|
|
|
|
Subscription.objects.filter(recipient_id__in=recipient_map.keys())
|
|
|
|
.exclude(user_profile_id=user_profile.id)
|
|
|
|
.values_list("recipient_id", "user_profile_id")
|
|
|
|
):
|
2021-02-12 08:20:45 +01:00
|
|
|
recipient_map[recipient_id]["user_ids"].append(user_profile_id)
|
2020-01-01 16:27:14 +01:00
|
|
|
|
|
|
|
# Sort to prevent test flakes and client bugs.
|
|
|
|
for rec in recipient_map.values():
|
2021-02-12 08:20:45 +01:00
|
|
|
rec["user_ids"].sort()
|
2020-01-01 16:27:14 +01:00
|
|
|
|
2019-03-20 04:15:58 +01:00
|
|
|
return recipient_map
|
2020-09-11 16:11:06 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-09-11 16:11:06 +02:00
|
|
|
def wildcard_mention_allowed(sender: UserProfile, stream: Stream) -> bool:
|
|
|
|
realm = sender.realm
|
|
|
|
|
|
|
|
# If there are fewer than Realm.WILDCARD_MENTION_THRESHOLD, we
|
|
|
|
# allow sending. In the future, we may want to make this behavior
|
|
|
|
# a default, and also just allow explicitly setting whether this
|
|
|
|
# applies to a stream as an override.
|
|
|
|
if num_subscribers_for_stream_id(stream.id) <= Realm.WILDCARD_MENTION_THRESHOLD:
|
|
|
|
return True
|
|
|
|
|
|
|
|
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_NOBODY:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_EVERYONE:
|
|
|
|
return True
|
|
|
|
|
|
|
|
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_ADMINS:
|
|
|
|
return sender.is_realm_admin
|
|
|
|
|
2021-05-02 09:56:58 +02:00
|
|
|
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_MODERATORS:
|
|
|
|
return sender.is_realm_admin or sender.is_moderator
|
|
|
|
|
2020-09-11 16:11:06 +02:00
|
|
|
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS:
|
2021-02-24 20:39:28 +01:00
|
|
|
return sender.is_realm_admin or (not sender.is_provisional_member and not sender.is_guest)
|
2020-09-11 16:11:06 +02:00
|
|
|
|
|
|
|
if realm.wildcard_mention_policy == Realm.WILDCARD_MENTION_POLICY_MEMBERS:
|
|
|
|
return not sender.is_guest
|
|
|
|
|
|
|
|
raise AssertionError("Invalid wildcard mention policy")
|
2021-06-14 18:49:28 +02:00
|
|
|
|
|
|
|
|
|
|
|
def parse_message_content_delete_limit(
|
|
|
|
value: Union[int, str],
|
|
|
|
special_values_map: Mapping[str, Optional[int]],
|
|
|
|
) -> Optional[int]:
|
|
|
|
if isinstance(value, str) and value in special_values_map.keys():
|
|
|
|
return special_values_map[value]
|
|
|
|
if isinstance(value, str) or value <= 0:
|
|
|
|
raise RequestVariableConversionError("message_content_delete_limit_seconds", value)
|
|
|
|
assert isinstance(value, int)
|
|
|
|
return value
|
2022-04-14 23:45:46 +02:00
|
|
|
|
|
|
|
|
|
|
|
def update_to_dict_cache(
|
|
|
|
changed_messages: List[Message], realm_id: Optional[int] = None
|
|
|
|
) -> List[int]:
|
|
|
|
"""Updates the message as stored in the to_dict cache (for serving
|
|
|
|
messages)."""
|
|
|
|
items_for_remote_cache = {}
|
|
|
|
message_ids = []
|
|
|
|
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
|
|
|
|
for msg_id, msg in changed_messages_to_dict.items():
|
|
|
|
message_ids.append(msg_id)
|
|
|
|
key = to_dict_cache_key_id(msg_id)
|
|
|
|
items_for_remote_cache[key] = (msg,)
|
|
|
|
|
|
|
|
cache_set_many(items_for_remote_cache)
|
|
|
|
return message_ids
|