2017-02-12 01:59:28 +01:00
|
|
|
# See http://zulip.readthedocs.io/en/latest/events-system.html for
|
|
|
|
# high-level documentation on how this system works.
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
import copy
|
|
|
|
import ujson
|
|
|
|
|
|
|
|
from django.utils.translation import ugettext as _
|
|
|
|
from django.conf import settings
|
|
|
|
from importlib import import_module
|
|
|
|
from six.moves import filter, map
|
|
|
|
from typing import (
|
2017-10-21 23:10:22 +02:00
|
|
|
cast, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Text, Tuple, Union
|
2017-02-10 23:04:46 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
session_engine = import_module(settings.SESSION_ENGINE)
|
|
|
|
|
|
|
|
from zerver.lib.alert_words import user_alert_words
|
2016-12-28 14:46:42 +01:00
|
|
|
from zerver.lib.attachments import user_attachments
|
2017-05-10 06:54:49 +02:00
|
|
|
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
|
2017-01-24 01:48:35 +01:00
|
|
|
from zerver.lib.hotspots import get_next_hotspots
|
2017-10-19 16:25:06 +02:00
|
|
|
from zerver.lib.integrations import EMBEDDED_BOTS
|
2017-05-23 03:02:01 +02:00
|
|
|
from zerver.lib.message import (
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
aggregate_unread_data,
|
2017-05-23 03:02:01 +02:00
|
|
|
apply_unread_message_event,
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
get_raw_unread_data,
|
2017-05-23 03:02:01 +02:00
|
|
|
)
|
2017-02-10 23:04:46 +01:00
|
|
|
from zerver.lib.narrow import check_supported_events_narrow_filter
|
2017-07-16 09:41:38 +02:00
|
|
|
from zerver.lib.soft_deactivation import maybe_catch_up_soft_deactivated_user
|
2017-02-21 03:41:20 +01:00
|
|
|
from zerver.lib.realm_icon import realm_icon_url
|
2017-02-10 23:04:46 +01:00
|
|
|
from zerver.lib.request import JsonableError
|
2017-08-24 17:58:40 +02:00
|
|
|
from zerver.lib.topic_mutes import get_topic_mutes
|
2017-05-23 03:02:01 +02:00
|
|
|
from zerver.lib.actions import (
|
|
|
|
validate_user_access_to_subscribers_helper,
|
|
|
|
do_get_streams, get_default_streams_for_realm,
|
|
|
|
gather_subscriptions_helper, get_cross_realm_dicts,
|
2017-02-10 23:04:46 +01:00
|
|
|
get_status_dict, streams_to_dicts_sorted
|
2017-05-23 03:02:01 +02:00
|
|
|
)
|
2017-09-15 01:15:40 +02:00
|
|
|
from zerver.lib.upload import get_total_uploads_size_for_user
|
2017-02-10 23:04:46 +01:00
|
|
|
from zerver.tornado.event_queue import request_event_queue, get_user_events
|
2017-04-25 11:50:30 +02:00
|
|
|
from zerver.models import Client, Message, Realm, UserPresence, UserProfile, \
|
2017-05-23 02:10:05 +02:00
|
|
|
get_user_profile_by_id, \
|
|
|
|
get_active_user_dicts_in_realm, realm_filters_for_realm, get_user,\
|
2017-04-29 06:06:57 +02:00
|
|
|
get_owned_bot_dicts, custom_profile_fields_for_realm, get_realm_domains
|
2017-10-24 20:59:11 +02:00
|
|
|
from zproject.backends import email_auth_enabled, password_auth_enabled
|
2017-02-27 08:30:26 +01:00
|
|
|
from version import ZULIP_VERSION
|
|
|
|
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
def get_realm_user_dicts(user_profile):
|
|
|
|
# type: (UserProfile) -> List[Dict[str, Text]]
|
|
|
|
return [{'email': userdict['email'],
|
|
|
|
'user_id': userdict['id'],
|
2017-05-10 06:54:49 +02:00
|
|
|
'avatar_url': avatar_url_from_dict(userdict),
|
2017-02-10 23:04:46 +01:00
|
|
|
'is_admin': userdict['is_realm_admin'],
|
|
|
|
'is_bot': userdict['is_bot'],
|
2017-04-02 20:57:27 +02:00
|
|
|
'full_name': userdict['full_name'],
|
|
|
|
'timezone': userdict['timezone']}
|
2017-09-16 20:56:56 +02:00
|
|
|
for userdict in get_active_user_dicts_in_realm(user_profile.realm_id)]
|
2017-02-10 23:04:46 +01:00
|
|
|
|
2017-10-21 23:10:22 +02:00
|
|
|
def always_want(msg_type):
|
|
|
|
# type: (str) -> bool
|
|
|
|
'''
|
|
|
|
This function is used as a helper in
|
|
|
|
fetch_initial_state_data, when the user passes
|
|
|
|
in None for event_types, and we want to fetch
|
|
|
|
info for every event type. Defining this at module
|
|
|
|
level makes it easier to mock.
|
|
|
|
'''
|
|
|
|
return True
|
|
|
|
|
2017-02-10 23:04:46 +01:00
|
|
|
# Fetch initial data. When event_types is not specified, clients want
|
|
|
|
# all event types. Whenever you add new code to this function, you
|
|
|
|
# should also add corresponding events for changes in the data
|
|
|
|
# structures and new code to apply_events (and add a test in EventsRegisterTest).
|
2017-02-20 07:52:37 +01:00
|
|
|
def fetch_initial_state_data(user_profile, event_types, queue_id,
|
|
|
|
include_subscribers=True):
|
|
|
|
# type: (UserProfile, Optional[Iterable[str]], str, bool) -> Dict[str, Any]
|
2017-05-17 21:07:57 +02:00
|
|
|
state = {'queue_id': queue_id} # type: Dict[str, Any]
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
if event_types is None:
|
2017-10-21 23:10:22 +02:00
|
|
|
# return True always
|
|
|
|
want = always_want # type: Callable[[str], bool]
|
2017-02-10 23:04:46 +01:00
|
|
|
else:
|
|
|
|
want = set(event_types).__contains__
|
|
|
|
|
|
|
|
if want('alert_words'):
|
|
|
|
state['alert_words'] = user_alert_words(user_profile)
|
|
|
|
|
2017-03-17 10:07:22 +01:00
|
|
|
if want('custom_profile_fields'):
|
|
|
|
fields = custom_profile_fields_for_realm(user_profile.realm.id)
|
|
|
|
state['custom_profile_fields'] = [f.as_dict() for f in fields]
|
|
|
|
|
2016-12-28 14:46:42 +01:00
|
|
|
if want('attachments'):
|
|
|
|
state['attachments'] = user_attachments(user_profile)
|
|
|
|
|
2017-09-15 01:16:05 +02:00
|
|
|
if want('upload_quota'):
|
|
|
|
state['upload_quota'] = user_profile.quota
|
|
|
|
|
2017-09-15 01:15:40 +02:00
|
|
|
if want('total_uploads_size'):
|
|
|
|
state['total_uploads_size'] = get_total_uploads_size_for_user(user_profile)
|
|
|
|
|
2017-01-24 01:48:35 +01:00
|
|
|
if want('hotspots'):
|
|
|
|
state['hotspots'] = get_next_hotspots(user_profile)
|
|
|
|
|
2017-02-10 23:04:46 +01:00
|
|
|
if want('message'):
|
2017-03-24 07:51:46 +01:00
|
|
|
# The client should use get_messages() to fetch messages
|
2017-02-10 23:04:46 +01:00
|
|
|
# starting with the max_message_id. They will get messages
|
|
|
|
# newer than that ID via get_events()
|
|
|
|
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
|
|
|
|
if messages:
|
|
|
|
state['max_message_id'] = messages[0].id
|
|
|
|
else:
|
|
|
|
state['max_message_id'] = -1
|
|
|
|
|
|
|
|
if want('muted_topics'):
|
2017-08-24 17:58:40 +02:00
|
|
|
state['muted_topics'] = get_topic_mutes(user_profile)
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
if want('pointer'):
|
|
|
|
state['pointer'] = user_profile.pointer
|
|
|
|
|
|
|
|
if want('presence'):
|
|
|
|
state['presences'] = get_status_dict(user_profile)
|
|
|
|
|
|
|
|
if want('realm'):
|
2017-04-19 05:30:38 +02:00
|
|
|
for property_name in Realm.property_types:
|
|
|
|
state['realm_' + property_name] = getattr(user_profile.realm, property_name)
|
2017-07-07 20:11:44 +02:00
|
|
|
|
|
|
|
# Most state is handled via the property_types framework;
|
|
|
|
# these manual entries are for those realm settings that don't
|
|
|
|
# fit into that framework.
|
2017-02-10 23:04:46 +01:00
|
|
|
state['realm_authentication_methods'] = user_profile.realm.authentication_methods_dict()
|
|
|
|
state['realm_allow_message_editing'] = user_profile.realm.allow_message_editing
|
|
|
|
state['realm_message_content_edit_limit_seconds'] = user_profile.realm.message_content_edit_limit_seconds
|
2017-02-26 20:35:23 +01:00
|
|
|
state['realm_icon_url'] = realm_icon_url(user_profile.realm)
|
|
|
|
state['realm_icon_source'] = user_profile.realm.icon_source
|
2017-03-06 06:22:28 +01:00
|
|
|
state['max_icon_file_size'] = settings.MAX_ICON_FILE_SIZE
|
2017-03-05 04:17:12 +01:00
|
|
|
state['realm_bot_domain'] = user_profile.realm.get_bot_domain()
|
2017-04-19 06:38:28 +02:00
|
|
|
state['realm_uri'] = user_profile.realm.uri
|
2017-04-20 07:35:53 +02:00
|
|
|
state['realm_presence_disabled'] = user_profile.realm.presence_disabled
|
2017-04-20 07:59:03 +02:00
|
|
|
state['realm_show_digest_email'] = user_profile.realm.show_digest_email
|
2017-04-20 08:03:44 +02:00
|
|
|
state['realm_is_zephyr_mirror_realm'] = user_profile.realm.is_zephyr_mirror_realm
|
2017-10-24 20:59:11 +02:00
|
|
|
state['realm_email_auth_enabled'] = email_auth_enabled(user_profile.realm)
|
2017-04-20 08:21:31 +02:00
|
|
|
state['realm_password_auth_enabled'] = password_auth_enabled(user_profile.realm)
|
2017-05-17 03:48:47 +02:00
|
|
|
if user_profile.realm.notifications_stream and not user_profile.realm.notifications_stream.deactivated:
|
|
|
|
notifications_stream = user_profile.realm.notifications_stream
|
|
|
|
state['realm_notifications_stream_id'] = notifications_stream.id
|
|
|
|
else:
|
|
|
|
state['realm_notifications_stream_id'] = -1
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
if want('realm_domains'):
|
2017-03-31 19:46:43 +02:00
|
|
|
state['realm_domains'] = get_realm_domains(user_profile.realm)
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
if want('realm_emoji'):
|
|
|
|
state['realm_emoji'] = user_profile.realm.get_emoji()
|
|
|
|
|
|
|
|
if want('realm_filters'):
|
|
|
|
state['realm_filters'] = realm_filters_for_realm(user_profile.realm_id)
|
|
|
|
|
|
|
|
if want('realm_user'):
|
|
|
|
state['realm_users'] = get_realm_user_dicts(user_profile)
|
2017-04-27 00:04:11 +02:00
|
|
|
state['avatar_source'] = user_profile.avatar_source
|
|
|
|
state['avatar_url_medium'] = avatar_url(user_profile, medium=True)
|
|
|
|
state['avatar_url'] = avatar_url(user_profile)
|
2017-04-27 00:17:57 +02:00
|
|
|
state['can_create_streams'] = user_profile.can_create_streams()
|
2017-05-17 05:23:13 +02:00
|
|
|
state['cross_realm_bots'] = list(get_cross_realm_dicts())
|
2017-04-27 00:19:34 +02:00
|
|
|
state['is_admin'] = user_profile.is_realm_admin
|
2017-04-27 00:21:16 +02:00
|
|
|
state['user_id'] = user_profile.id
|
2017-04-27 00:23:10 +02:00
|
|
|
state['enter_sends'] = user_profile.enter_sends
|
2017-04-27 00:24:54 +02:00
|
|
|
state['email'] = user_profile.email
|
2017-04-27 00:26:49 +02:00
|
|
|
state['full_name'] = user_profile.full_name
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
if want('realm_bot'):
|
|
|
|
state['realm_bots'] = get_owned_bot_dicts(user_profile)
|
|
|
|
|
2017-10-19 16:25:06 +02:00
|
|
|
# This does not yet have an apply_event counterpart, since currently,
|
|
|
|
# new entries for EMBEDDED_BOTS can only be added directly in the codebase.
|
|
|
|
if want('realm_embedded_bots'):
|
|
|
|
state['realm_embedded_bots'] = list(bot.name for bot in EMBEDDED_BOTS)
|
|
|
|
|
2017-02-10 23:04:46 +01:00
|
|
|
if want('subscription'):
|
2017-02-20 07:52:37 +01:00
|
|
|
subscriptions, unsubscribed, never_subscribed = gather_subscriptions_helper(
|
|
|
|
user_profile, include_subscribers=include_subscribers)
|
2017-02-10 23:04:46 +01:00
|
|
|
state['subscriptions'] = subscriptions
|
|
|
|
state['unsubscribed'] = unsubscribed
|
|
|
|
state['never_subscribed'] = never_subscribed
|
|
|
|
|
2017-05-23 03:02:01 +02:00
|
|
|
if want('update_message_flags') and want('message'):
|
|
|
|
# Keeping unread_msgs updated requires both message flag updates and
|
|
|
|
# message updates. This is due to the fact that new messages will not
|
|
|
|
# generate a flag update so we need to use the flags field in the
|
|
|
|
# message event.
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
state['raw_unread_msgs'] = get_raw_unread_data(user_profile)
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
if want('stream'):
|
|
|
|
state['streams'] = do_get_streams(user_profile)
|
|
|
|
if want('default_streams'):
|
2017-09-17 00:34:13 +02:00
|
|
|
state['realm_default_streams'] = streams_to_dicts_sorted(get_default_streams_for_realm(user_profile.realm_id))
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
if want('update_display_settings'):
|
2017-05-26 14:16:59 +02:00
|
|
|
for prop in UserProfile.property_types:
|
|
|
|
state[prop] = getattr(user_profile, prop)
|
2017-04-26 23:49:40 +02:00
|
|
|
state['emojiset_choices'] = user_profile.emojiset_choices()
|
2017-04-27 00:16:24 +02:00
|
|
|
state['autoscroll_forever'] = user_profile.autoscroll_forever
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
if want('update_global_notifications'):
|
2017-05-26 14:16:59 +02:00
|
|
|
for notification in UserProfile.notification_setting_types:
|
|
|
|
state[notification] = getattr(user_profile, notification)
|
2017-04-27 00:14:48 +02:00
|
|
|
state['default_desktop_notifications'] = user_profile.default_desktop_notifications
|
2017-02-10 23:04:46 +01:00
|
|
|
|
2017-02-27 08:30:26 +01:00
|
|
|
if want('zulip_version'):
|
|
|
|
state['zulip_version'] = ZULIP_VERSION
|
|
|
|
|
2017-02-10 23:04:46 +01:00
|
|
|
return state
|
|
|
|
|
2017-05-23 03:02:01 +02:00
|
|
|
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
def remove_message_id_from_unread_mgs(state, message_id):
|
2017-08-09 01:25:34 +02:00
|
|
|
# type: (Dict[str, Dict[str, Any]], int) -> None
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
raw_unread = state['raw_unread_msgs']
|
|
|
|
|
|
|
|
for key in ['pm_dict', 'stream_dict', 'huddle_dict']:
|
|
|
|
raw_unread[key].pop(message_id, None)
|
|
|
|
|
|
|
|
raw_unread['unmuted_stream_msgs'].discard(message_id)
|
|
|
|
raw_unread['mentions'].discard(message_id)
|
2017-07-21 20:31:25 +02:00
|
|
|
|
2017-04-26 23:29:25 +02:00
|
|
|
def apply_events(state, events, user_profile, include_subscribers=True,
|
|
|
|
fetch_event_types=None):
|
|
|
|
# type: (Dict[str, Any], Iterable[Dict[str, Any]], UserProfile, bool, Optional[Iterable[str]]) -> None
|
2017-02-10 23:04:46 +01:00
|
|
|
for event in events:
|
2017-04-26 23:29:25 +02:00
|
|
|
if fetch_event_types is not None and event['type'] not in fetch_event_types:
|
|
|
|
# TODO: continuing here is not, most precisely, correct.
|
|
|
|
# In theory, an event of one type, e.g. `realm_user`,
|
|
|
|
# could modify state that doesn't come from that
|
|
|
|
# `fetch_event_types` value, e.g. the `our_person` part of
|
|
|
|
# that code path. But it should be extremely rare, and
|
|
|
|
# fixing that will require a nontrivial refactor of
|
|
|
|
# `apply_event`. For now, be careful in your choice of
|
|
|
|
# `fetch_event_types`.
|
|
|
|
continue
|
2017-02-20 20:09:48 +01:00
|
|
|
apply_event(state, event, user_profile, include_subscribers)
|
|
|
|
|
|
|
|
def apply_event(state, event, user_profile, include_subscribers):
|
|
|
|
# type: (Dict[str, Any], Dict[str, Any], UserProfile, bool) -> None
|
|
|
|
if event['type'] == "message":
|
|
|
|
state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
if 'raw_unread_msgs' in state:
|
2017-10-12 01:21:34 +02:00
|
|
|
apply_unread_message_event(
|
|
|
|
user_profile,
|
|
|
|
state['raw_unread_msgs'],
|
|
|
|
event['message'],
|
|
|
|
event['flags'],
|
|
|
|
)
|
2017-05-23 03:02:01 +02:00
|
|
|
|
2017-01-24 01:48:35 +01:00
|
|
|
elif event['type'] == "hotspots":
|
|
|
|
state['hotspots'] = event['hotspots']
|
2017-03-17 10:07:22 +01:00
|
|
|
elif event['type'] == "custom_profile_fields":
|
|
|
|
state['custom_profile_fields'] = event['fields']
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['type'] == "pointer":
|
|
|
|
state['pointer'] = max(state['pointer'], event['pointer'])
|
|
|
|
elif event['type'] == "realm_user":
|
|
|
|
person = event['person']
|
|
|
|
|
|
|
|
def our_person(p):
|
|
|
|
# type: (Dict[str, Any]) -> bool
|
|
|
|
return p['user_id'] == person['user_id']
|
|
|
|
|
|
|
|
if event['op'] == "add":
|
|
|
|
state['realm_users'].append(person)
|
|
|
|
elif event['op'] == "remove":
|
|
|
|
state['realm_users'] = [user for user in state['realm_users'] if not our_person(user)]
|
|
|
|
elif event['op'] == 'update':
|
2017-04-27 00:04:11 +02:00
|
|
|
if (person['user_id'] == user_profile.id and 'avatar_url' in person and 'avatar_url' in state):
|
|
|
|
state['avatar_source'] = person['avatar_source']
|
|
|
|
state['avatar_url'] = person['avatar_url']
|
|
|
|
state['avatar_url_medium'] = person['avatar_url_medium']
|
|
|
|
if 'avatar_source' in person:
|
|
|
|
# Drop these so that they don't modify the
|
|
|
|
# `realm_user` structure in the `p.update()` line
|
|
|
|
# later; they're only used in the above lines
|
|
|
|
del person['avatar_source']
|
|
|
|
del person['avatar_url_medium']
|
|
|
|
|
2017-04-27 00:26:49 +02:00
|
|
|
for field in ['is_admin', 'email', 'full_name']:
|
2017-04-27 00:19:34 +02:00
|
|
|
if person['user_id'] == user_profile.id and field in person and field in state:
|
|
|
|
state[field] = person[field]
|
|
|
|
|
2017-02-20 20:09:48 +01:00
|
|
|
for p in state['realm_users']:
|
|
|
|
if our_person(p):
|
|
|
|
# In the unlikely event that the current user
|
|
|
|
# just changed to/from being an admin, we need
|
|
|
|
# to add/remove the data on all bots in the
|
|
|
|
# realm. This is ugly and probably better
|
|
|
|
# solved by removing the all-realm-bots data
|
|
|
|
# given to admin users from this flow.
|
|
|
|
if ('is_admin' in person and 'realm_bots' in state and
|
|
|
|
user_profile.email == person['email']):
|
|
|
|
if p['is_admin'] and not person['is_admin']:
|
|
|
|
state['realm_bots'] = []
|
|
|
|
if not p['is_admin'] and person['is_admin']:
|
|
|
|
state['realm_bots'] = get_owned_bot_dicts(user_profile)
|
2017-02-21 21:37:16 +01:00
|
|
|
|
2017-02-20 20:09:48 +01:00
|
|
|
# Now update the person
|
|
|
|
p.update(person)
|
|
|
|
elif event['type'] == 'realm_bot':
|
|
|
|
if event['op'] == 'add':
|
|
|
|
state['realm_bots'].append(event['bot'])
|
|
|
|
|
|
|
|
if event['op'] == 'remove':
|
|
|
|
email = event['bot']['email']
|
2017-02-06 20:45:26 +01:00
|
|
|
for bot in state['realm_bots']:
|
|
|
|
if bot['email'] == email:
|
|
|
|
bot['is_active'] = False
|
2017-02-20 20:09:48 +01:00
|
|
|
|
|
|
|
if event['op'] == 'update':
|
|
|
|
for bot in state['realm_bots']:
|
|
|
|
if bot['email'] == event['bot']['email']:
|
2017-02-24 06:36:54 +01:00
|
|
|
if 'owner_id' in event['bot']:
|
|
|
|
bot['owner'] = get_user_profile_by_id(event['bot']['owner_id']).email
|
|
|
|
else:
|
|
|
|
bot.update(event['bot'])
|
2017-02-20 20:09:48 +01:00
|
|
|
|
|
|
|
elif event['type'] == 'stream':
|
|
|
|
if event['op'] == 'create':
|
|
|
|
for stream in event['streams']:
|
|
|
|
if not stream['invite_only']:
|
|
|
|
stream_data = copy.deepcopy(stream)
|
|
|
|
if include_subscribers:
|
|
|
|
stream_data['subscribers'] = []
|
|
|
|
# Add stream to never_subscribed (if not invite_only)
|
|
|
|
state['never_subscribed'].append(stream_data)
|
2017-03-24 05:49:23 +01:00
|
|
|
state['streams'].append(stream)
|
|
|
|
state['streams'].sort(key=lambda elt: elt["name"])
|
2017-02-20 20:09:48 +01:00
|
|
|
|
|
|
|
if event['op'] == 'delete':
|
|
|
|
deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}
|
|
|
|
state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]
|
|
|
|
state['never_subscribed'] = [stream for stream in state['never_subscribed'] if
|
|
|
|
stream['stream_id'] not in deleted_stream_ids]
|
|
|
|
|
|
|
|
if event['op'] == 'update':
|
|
|
|
# For legacy reasons, we call stream data 'subscriptions' in
|
|
|
|
# the state var here, for the benefit of the JS code.
|
|
|
|
for obj in state['subscriptions']:
|
|
|
|
if obj['name'].lower() == event['name'].lower():
|
|
|
|
obj[event['property']] = event['value']
|
|
|
|
# Also update the pure streams data
|
|
|
|
for stream in state['streams']:
|
|
|
|
if stream['name'].lower() == event['name'].lower():
|
|
|
|
prop = event['property']
|
|
|
|
if prop in stream:
|
|
|
|
stream[prop] = event['value']
|
|
|
|
elif event['op'] == "occupy":
|
|
|
|
state['streams'] += event['streams']
|
|
|
|
elif event['op'] == "vacate":
|
|
|
|
stream_ids = [s["stream_id"] for s in event['streams']]
|
|
|
|
state['streams'] = [s for s in state['streams'] if s["stream_id"] not in stream_ids]
|
|
|
|
elif event['type'] == 'default_streams':
|
|
|
|
state['realm_default_streams'] = event['default_streams']
|
|
|
|
elif event['type'] == 'realm':
|
|
|
|
if event['op'] == "update":
|
|
|
|
field = 'realm_' + event['property']
|
|
|
|
state[field] = event['value']
|
2017-04-27 00:17:57 +02:00
|
|
|
|
|
|
|
# Tricky interaction: Whether we can create streams can get changed here.
|
2017-05-29 03:21:10 +02:00
|
|
|
if (field in ['realm_create_stream_by_admins_only',
|
|
|
|
'realm_waiting_period_threshold']) and 'can_create_streams' in state:
|
2017-04-27 00:17:57 +02:00
|
|
|
state['can_create_streams'] = user_profile.can_create_streams()
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['op'] == "update_dict":
|
|
|
|
for key, value in event['data'].items():
|
|
|
|
state['realm_' + key] = value
|
2017-04-20 08:21:31 +02:00
|
|
|
# It's a bit messy, but this is where we need to
|
|
|
|
# update the state for whether password authentication
|
|
|
|
# is enabled on this server.
|
|
|
|
if key == 'authentication_methods':
|
|
|
|
state['realm_password_auth_enabled'] = (value['Email'] or value['LDAP'])
|
2017-10-24 20:59:11 +02:00
|
|
|
state['realm_email_auth_enabled'] = value['Email']
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['type'] == "subscription":
|
|
|
|
if not include_subscribers and event['op'] in ['peer_add', 'peer_remove']:
|
|
|
|
return
|
|
|
|
|
|
|
|
if event['op'] in ["add"]:
|
2017-10-07 16:00:39 +02:00
|
|
|
if not include_subscribers:
|
2017-02-20 20:09:48 +01:00
|
|
|
# Avoid letting 'subscribers' entries end up in the list
|
|
|
|
for i, sub in enumerate(event['subscriptions']):
|
|
|
|
event['subscriptions'][i] = copy.deepcopy(event['subscriptions'][i])
|
|
|
|
del event['subscriptions'][i]['subscribers']
|
|
|
|
|
|
|
|
def name(sub):
|
|
|
|
# type: (Dict[str, Any]) -> Text
|
|
|
|
return sub['name'].lower()
|
|
|
|
|
|
|
|
if event['op'] == "add":
|
|
|
|
added_names = set(map(name, event["subscriptions"]))
|
|
|
|
was_added = lambda s: name(s) in added_names
|
|
|
|
|
|
|
|
# add the new subscriptions
|
|
|
|
state['subscriptions'] += event['subscriptions']
|
|
|
|
|
|
|
|
# remove them from unsubscribed if they had been there
|
|
|
|
state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]
|
|
|
|
|
|
|
|
# remove them from never_subscribed if they had been there
|
|
|
|
state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]
|
|
|
|
|
|
|
|
elif event['op'] == "remove":
|
|
|
|
removed_names = set(map(name, event["subscriptions"]))
|
|
|
|
was_removed = lambda s: name(s) in removed_names
|
|
|
|
|
|
|
|
# Find the subs we are affecting.
|
|
|
|
removed_subs = list(filter(was_removed, state['subscriptions']))
|
|
|
|
|
|
|
|
# Remove our user from the subscribers of the removed subscriptions.
|
|
|
|
if include_subscribers:
|
|
|
|
for sub in removed_subs:
|
|
|
|
sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]
|
|
|
|
|
|
|
|
# We must effectively copy the removed subscriptions from subscriptions to
|
|
|
|
# unsubscribe, since we only have the name in our data structure.
|
|
|
|
state['unsubscribed'] += removed_subs
|
|
|
|
|
|
|
|
# Now filter out the removed subscriptions from subscriptions.
|
|
|
|
state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]
|
|
|
|
|
|
|
|
elif event['op'] == 'update':
|
|
|
|
for sub in state['subscriptions']:
|
|
|
|
if sub['name'].lower() == event['name'].lower():
|
|
|
|
sub[event['property']] = event['value']
|
|
|
|
elif event['op'] == 'peer_add':
|
|
|
|
user_id = event['user_id']
|
|
|
|
for sub in state['subscriptions']:
|
|
|
|
if (sub['name'] in event['subscriptions'] and
|
|
|
|
user_id not in sub['subscribers']):
|
|
|
|
sub['subscribers'].append(user_id)
|
|
|
|
for sub in state['never_subscribed']:
|
|
|
|
if (sub['name'] in event['subscriptions'] and
|
|
|
|
user_id not in sub['subscribers']):
|
|
|
|
sub['subscribers'].append(user_id)
|
|
|
|
elif event['op'] == 'peer_remove':
|
|
|
|
user_id = event['user_id']
|
|
|
|
for sub in state['subscriptions']:
|
|
|
|
if (sub['name'] in event['subscriptions'] and
|
|
|
|
user_id in sub['subscribers']):
|
|
|
|
sub['subscribers'].remove(user_id)
|
|
|
|
elif event['type'] == "presence":
|
2017-04-25 11:50:30 +02:00
|
|
|
# TODO: Add user_id to presence update events / state format!
|
2017-05-23 02:10:05 +02:00
|
|
|
presence_user_profile = get_user(event['email'], user_profile.realm)
|
2017-04-25 11:50:30 +02:00
|
|
|
state['presences'][event['email']] = UserPresence.get_status_dict_by_user(presence_user_profile)[event['email']]
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['type'] == "update_message":
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
# We don't return messages in /register, so we don't need to
|
|
|
|
# do anything for content updates, but we may need to update
|
|
|
|
# the unread_msgs data if the topic of an unread message changed.
|
|
|
|
if 'subject' in event:
|
|
|
|
stream_dict = state['raw_unread_msgs']['stream_dict']
|
|
|
|
topic = event['subject']
|
|
|
|
for message_id in event['message_ids']:
|
|
|
|
if message_id in stream_dict:
|
|
|
|
stream_dict[message_id]['topic'] = topic
|
2017-05-14 21:14:26 +02:00
|
|
|
elif event['type'] == "delete_message":
|
|
|
|
max_message = Message.objects.filter(
|
|
|
|
usermessage__user_profile=user_profile).order_by('-id').first()
|
|
|
|
if max_message:
|
|
|
|
state['max_message_id'] = max_message.id
|
|
|
|
else:
|
|
|
|
state['max_message_id'] = -1
|
2017-05-23 03:02:01 +02:00
|
|
|
|
|
|
|
remove_id = event['message_id']
|
|
|
|
remove_message_id_from_unread_mgs(state, remove_id)
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['type'] == "reaction":
|
|
|
|
# The client will get the message with the reactions directly
|
|
|
|
pass
|
2017-03-18 03:50:41 +01:00
|
|
|
elif event['type'] == 'typing':
|
|
|
|
# Typing notification events are transient and thus ignored
|
|
|
|
pass
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['type'] == "update_message_flags":
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
# We don't return messages in `/register`, so most flags we
|
|
|
|
# can ignore, but we do need to update the unread_msgs data if
|
|
|
|
# unread state is changed.
|
2017-05-23 03:02:01 +02:00
|
|
|
if event['flag'] == 'read' and event['operation'] == 'add':
|
|
|
|
for remove_id in event['messages']:
|
|
|
|
remove_message_id_from_unread_mgs(state, remove_id)
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['type'] == "realm_domains":
|
|
|
|
if event['op'] == 'add':
|
2017-03-31 20:10:29 +02:00
|
|
|
state['realm_domains'].append(event['realm_domain'])
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['op'] == 'change':
|
|
|
|
for realm_domain in state['realm_domains']:
|
2017-03-31 20:10:29 +02:00
|
|
|
if realm_domain['domain'] == event['realm_domain']['domain']:
|
|
|
|
realm_domain['allow_subdomains'] = event['realm_domain']['allow_subdomains']
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['op'] == 'remove':
|
2017-03-31 20:10:29 +02:00
|
|
|
state['realm_domains'] = [realm_domain for realm_domain in state['realm_domains']
|
|
|
|
if realm_domain['domain'] != event['domain']]
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['type'] == "realm_emoji":
|
|
|
|
state['realm_emoji'] = event['realm_emoji']
|
|
|
|
elif event['type'] == "alert_words":
|
|
|
|
state['alert_words'] = event['alert_words']
|
|
|
|
elif event['type'] == "muted_topics":
|
|
|
|
state['muted_topics'] = event["muted_topics"]
|
|
|
|
elif event['type'] == "realm_filters":
|
|
|
|
state['realm_filters'] = event["realm_filters"]
|
|
|
|
elif event['type'] == "update_display_settings":
|
2017-07-07 23:26:44 +02:00
|
|
|
assert event['setting_name'] in UserProfile.property_types
|
|
|
|
state[event['setting_name']] = event['setting']
|
2017-02-20 20:09:48 +01:00
|
|
|
elif event['type'] == "update_global_notifications":
|
2017-07-07 23:26:44 +02:00
|
|
|
assert event['notification_name'] in UserProfile.notification_setting_types
|
|
|
|
state[event['notification_name']] = event['setting']
|
2017-02-20 20:09:48 +01:00
|
|
|
else:
|
2017-03-24 06:38:06 +01:00
|
|
|
raise AssertionError("Unexpected event type %s" % (event['type'],))
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
def do_events_register(user_profile, user_client, apply_markdown=True,
|
|
|
|
event_types=None, queue_lifespan_secs=0, all_public_streams=False,
|
2017-04-26 23:29:25 +02:00
|
|
|
include_subscribers=True, narrow=[], fetch_event_types=None):
|
|
|
|
# type: (UserProfile, Client, bool, Optional[Iterable[str]], int, bool, bool, Iterable[Sequence[Text]], Optional[Iterable[str]]) -> Dict[str, Any]
|
|
|
|
|
2017-02-10 23:04:46 +01:00
|
|
|
# Technically we don't need to check this here because
|
|
|
|
# build_narrow_filter will check it, but it's nicer from an error
|
|
|
|
# handling perspective to do it before contacting Tornado
|
|
|
|
check_supported_events_narrow_filter(narrow)
|
2017-04-26 23:29:25 +02:00
|
|
|
|
|
|
|
# Note that we pass event_types, not fetch_event_types here, since
|
|
|
|
# that's what controls which future events are sent.
|
2017-02-10 23:04:46 +01:00
|
|
|
queue_id = request_event_queue(user_profile, user_client, apply_markdown,
|
|
|
|
queue_lifespan_secs, event_types, all_public_streams,
|
|
|
|
narrow=narrow)
|
|
|
|
|
|
|
|
if queue_id is None:
|
|
|
|
raise JsonableError(_("Could not allocate event queue"))
|
2017-04-26 23:29:25 +02:00
|
|
|
|
|
|
|
if fetch_event_types is not None:
|
|
|
|
event_types_set = set(fetch_event_types) # type: Optional[Set[str]]
|
|
|
|
elif event_types is not None:
|
|
|
|
event_types_set = set(event_types)
|
2017-02-10 23:04:46 +01:00
|
|
|
else:
|
|
|
|
event_types_set = None
|
|
|
|
|
2017-07-16 09:41:38 +02:00
|
|
|
# Fill up the UserMessage rows if a soft-deactivated user has returned
|
|
|
|
maybe_catch_up_soft_deactivated_user(user_profile)
|
|
|
|
|
2017-02-20 07:52:37 +01:00
|
|
|
ret = fetch_initial_state_data(user_profile, event_types_set, queue_id,
|
|
|
|
include_subscribers=include_subscribers)
|
2017-02-10 23:04:46 +01:00
|
|
|
|
|
|
|
# Apply events that came in while we were fetching initial data
|
|
|
|
events = get_user_events(user_profile, queue_id, -1)
|
2017-04-26 23:29:25 +02:00
|
|
|
apply_events(ret, events, user_profile, include_subscribers=include_subscribers,
|
|
|
|
fetch_event_types=fetch_event_types)
|
2017-05-23 03:02:01 +02:00
|
|
|
|
Simplify how we apply events for unread messages.
The logic to apply events to page_params['unread_msgs'] was
complicated due to the aggregated data structures that we pass
down to the client.
Now we defer the aggregation logic until after we apply the
events. This leads to some simplifications in that codepath,
as well as some performance enhancements.
The intermediate data structure has sets and dictionaries that
generally are keyed by message_id, so most message-related
updates are O(1) in nature.
Also, by waiting to compute the counts until the end, it's a
bit less messy to try to keep track of increments/decrements.
Instead, we just update the dictionaries and sets during the
event-apply phase.
This change also fixes some corner cases:
* We now respect mutes when updating counts.
* For message updates, instead of bluntly updating
the whole topic bucket, we update individual
message ids.
Unfortunately, this change doesn't seem to address the pesky
test that fails sporadically on Travis, related to mention
updates. It will change the symptom, slightly, though.
2017-10-05 00:34:19 +02:00
|
|
|
'''
|
|
|
|
NOTE:
|
|
|
|
|
|
|
|
Below is an example of post-processing initial state data AFTER we
|
|
|
|
apply events. For large payloads like `unread_msgs`, it's helpful
|
|
|
|
to have an intermediate data structure that is easy to manipulate
|
|
|
|
with O(1)-type operations as we apply events.
|
|
|
|
|
|
|
|
Then, only at the end, we put it in the form that's more appropriate
|
|
|
|
for client.
|
|
|
|
'''
|
|
|
|
if 'raw_unread_msgs' in ret:
|
|
|
|
ret['unread_msgs'] = aggregate_unread_data(ret['raw_unread_msgs'])
|
|
|
|
del ret['raw_unread_msgs']
|
|
|
|
|
2017-03-03 23:15:18 +01:00
|
|
|
if len(events) > 0:
|
2017-02-10 23:04:46 +01:00
|
|
|
ret['last_event_id'] = events[-1]['id']
|
|
|
|
else:
|
|
|
|
ret['last_event_id'] = -1
|
|
|
|
return ret
|