2017-11-16 19:51:44 +01:00
|
|
|
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
|
2017-02-12 01:59:28 +01:00
|
|
|
# high-level documentation on how this system works.
|
2017-03-20 05:34:59 +01:00
|
|
|
from typing import cast, AbstractSet, Any, Callable, Dict, List, \
|
2018-05-11 01:43:30 +02:00
|
|
|
Mapping, MutableMapping, Optional, Iterable, Sequence, Set, Union
|
2019-08-06 01:29:34 +02:00
|
|
|
from typing_extensions import Deque, TypedDict
|
2013-04-23 18:51:17 +02:00
|
|
|
|
2016-05-25 15:02:02 +02:00
|
|
|
from django.utils.translation import ugettext as _
|
2013-03-26 18:06:00 +01:00
|
|
|
from django.conf import settings
|
|
|
|
from collections import deque
|
2013-03-20 23:03:41 +01:00
|
|
|
import os
|
2013-03-26 18:06:00 +01:00
|
|
|
import time
|
|
|
|
import logging
|
2013-06-18 23:55:55 +02:00
|
|
|
import ujson
|
2013-03-14 23:21:53 +01:00
|
|
|
import requests
|
2013-03-20 23:03:41 +01:00
|
|
|
import atexit
|
|
|
|
import sys
|
|
|
|
import signal
|
2016-10-18 02:57:41 +02:00
|
|
|
import tornado.ioloop
|
2013-03-22 23:25:37 +01:00
|
|
|
import random
|
2018-11-02 23:33:54 +01:00
|
|
|
from zerver.models import UserProfile, Client, Realm
|
2017-11-02 12:11:39 +01:00
|
|
|
from zerver.decorator import cachify
|
2016-11-27 06:36:06 +01:00
|
|
|
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
|
2016-03-21 00:31:18 +01:00
|
|
|
finish_handler, handler_stats_string
|
2013-07-29 23:03:31 +02:00
|
|
|
from zerver.lib.utils import statsd
|
2018-10-17 00:39:10 +02:00
|
|
|
from zerver.middleware import async_request_timer_restart
|
2017-10-20 21:34:05 +02:00
|
|
|
from zerver.lib.message import MessageDict
|
2013-12-10 16:28:16 +01:00
|
|
|
from zerver.lib.narrow import build_narrow_filter
|
2014-04-24 02:16:53 +02:00
|
|
|
from zerver.lib.queue import queue_json_publish
|
2016-05-29 16:52:55 +02:00
|
|
|
from zerver.lib.request import JsonableError
|
2016-11-27 06:14:48 +01:00
|
|
|
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
|
2017-07-21 02:20:31 +02:00
|
|
|
from zerver.tornado.exceptions import BadEventQueueIdError
|
2018-11-03 00:06:13 +01:00
|
|
|
from zerver.tornado.sharding import get_tornado_uri, get_tornado_port, \
|
2019-02-02 23:53:49 +01:00
|
|
|
notify_tornado_queue_name
|
2019-08-02 21:14:29 +02:00
|
|
|
from zerver.tornado.autoreload import add_reload_hook
|
2013-11-22 20:30:32 +01:00
|
|
|
import copy
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2016-08-30 12:03:01 +02:00
|
|
|
requests_client = requests.Session()
|
2016-08-31 10:00:32 +02:00
|
|
|
for host in ['127.0.0.1', 'localhost']:
|
|
|
|
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
|
|
|
|
# This seems like the only working solution to ignore proxy in
|
|
|
|
# requests library.
|
|
|
|
requests_client.trust_env = False
|
2016-08-30 12:03:01 +02:00
|
|
|
|
2013-04-18 22:17:48 +02:00
|
|
|
# The idle timeout used to be a week, but we found that in that
|
|
|
|
# situation, queues from dead browser sessions would grow quite large
|
|
|
|
# due to the accumulation of message data in those queues.
|
2018-12-05 23:48:40 +01:00
|
|
|
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
|
2018-12-05 23:30:17 +01:00
|
|
|
# We garbage-collect every minute; this is totally fine given that the
|
|
|
|
# GC scan takes ~2ms with 1000 event queues.
|
|
|
|
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 1
|
2013-08-05 22:09:12 +02:00
|
|
|
|
|
|
|
# Capped limit for how long a client can request an event queue
|
|
|
|
# to live
|
|
|
|
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
|
|
|
|
|
2013-03-28 22:39:43 +01:00
|
|
|
# The heartbeats effectively act as a server-side timeout for
|
|
|
|
# get_events(). The actual timeout value is randomized for each
|
|
|
|
# client connection based on the below value. We ensure that the
|
|
|
|
# maximum timeout value is 55 seconds, to deal with crappy home
|
|
|
|
# wireless routers that kill "inactive" http connections.
|
|
|
|
HEARTBEAT_MIN_FREQ_SECS = 45
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2017-11-05 11:52:10 +01:00
|
|
|
class ClientDescriptor:
|
2017-12-22 13:44:37 +01:00
|
|
|
def __init__(self,
|
|
|
|
user_profile_id: int,
|
|
|
|
realm_id: int, event_queue: 'EventQueue',
|
|
|
|
event_types: Optional[Sequence[str]],
|
2018-05-11 01:43:30 +02:00
|
|
|
client_type_name: str,
|
2017-12-22 13:44:37 +01:00
|
|
|
apply_markdown: bool=True,
|
|
|
|
client_gravatar: bool=True,
|
2020-02-02 17:29:05 +01:00
|
|
|
slim_presence: bool=False,
|
2017-12-22 13:44:37 +01:00
|
|
|
all_public_streams: bool=False,
|
|
|
|
lifespan_secs: int=0,
|
|
|
|
narrow: Iterable[Sequence[str]]=[]) -> None:
|
2013-11-19 23:51:32 +01:00
|
|
|
# These objects are serialized on shutdown and restored on restart.
|
2013-10-17 23:51:25 +02:00
|
|
|
# If fields are added or semantics are changed, temporary code must be
|
2013-11-19 23:51:32 +01:00
|
|
|
# added to load_event_queues() to update the restored objects.
|
|
|
|
# Additionally, the to_dict and from_dict methods must be updated
|
2013-03-26 18:06:00 +01:00
|
|
|
self.user_profile_id = user_profile_id
|
2013-10-17 23:51:25 +02:00
|
|
|
self.realm_id = realm_id
|
2017-06-04 12:15:23 +02:00
|
|
|
self.current_handler_id = None # type: Optional[int]
|
2018-05-11 01:43:30 +02:00
|
|
|
self.current_client_name = None # type: Optional[str]
|
2013-11-19 23:11:53 +01:00
|
|
|
self.event_queue = event_queue
|
2013-03-22 22:43:49 +01:00
|
|
|
self.event_types = event_types
|
2013-03-26 18:06:00 +01:00
|
|
|
self.last_connection_time = time.time()
|
|
|
|
self.apply_markdown = apply_markdown
|
2017-10-31 18:36:18 +01:00
|
|
|
self.client_gravatar = client_gravatar
|
2020-02-02 17:29:05 +01:00
|
|
|
self.slim_presence = slim_presence
|
2013-10-17 23:51:25 +02:00
|
|
|
self.all_public_streams = all_public_streams
|
2014-01-28 18:13:41 +01:00
|
|
|
self.client_type_name = client_type_name
|
2017-11-30 00:15:49 +01:00
|
|
|
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.call_later
|
2013-12-11 23:27:36 +01:00
|
|
|
self.narrow = narrow
|
2013-12-10 16:28:16 +01:00
|
|
|
self.narrow_filter = build_narrow_filter(narrow)
|
2013-03-22 23:25:37 +01:00
|
|
|
|
2018-12-05 23:48:40 +01:00
|
|
|
# Default for lifespan_secs is DEFAULT_EVENT_QUEUE_TIMEOUT_SECS;
|
2018-12-05 23:45:24 +01:00
|
|
|
# but users can set it as high as MAX_QUEUE_TIMEOUT_SECS.
|
2018-12-05 23:49:54 +01:00
|
|
|
if lifespan_secs == 0:
|
2018-12-05 23:48:40 +01:00
|
|
|
lifespan_secs = DEFAULT_EVENT_QUEUE_TIMEOUT_SECS
|
2018-12-05 23:45:24 +01:00
|
|
|
self.queue_timeout = min(lifespan_secs, MAX_QUEUE_TIMEOUT_SECS)
|
2013-08-05 22:09:12 +02:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
2013-11-22 20:30:32 +01:00
|
|
|
# If you add a new key to this dict, make sure you add appropriate
|
|
|
|
# migration code in from_dict or load_event_queues to account for
|
|
|
|
# loading event queues that lack that key.
|
2013-11-19 23:51:32 +01:00
|
|
|
return dict(user_profile_id=self.user_profile_id,
|
|
|
|
realm_id=self.realm_id,
|
|
|
|
event_queue=self.event_queue.to_dict(),
|
|
|
|
queue_timeout=self.queue_timeout,
|
|
|
|
event_types=self.event_types,
|
|
|
|
last_connection_time=self.last_connection_time,
|
|
|
|
apply_markdown=self.apply_markdown,
|
2017-10-31 18:36:18 +01:00
|
|
|
client_gravatar=self.client_gravatar,
|
2020-02-02 17:29:05 +01:00
|
|
|
slim_presence=self.slim_presence,
|
2013-11-19 23:51:32 +01:00
|
|
|
all_public_streams=self.all_public_streams,
|
2013-12-11 23:27:36 +01:00
|
|
|
narrow=self.narrow,
|
2016-01-27 17:04:38 +01:00
|
|
|
client_type_name=self.client_type_name)
|
2013-11-19 23:51:32 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def __repr__(self) -> str:
|
2016-03-20 23:59:29 +01:00
|
|
|
return "ClientDescriptor<%s>" % (self.event_queue.id,)
|
|
|
|
|
2013-11-19 23:51:32 +01:00
|
|
|
@classmethod
|
2017-11-27 11:09:23 +01:00
|
|
|
def from_dict(cls, d: MutableMapping[str, Any]) -> 'ClientDescriptor':
|
2016-01-27 17:04:38 +01:00
|
|
|
if 'client_type' in d:
|
|
|
|
# Temporary migration for the rename of client_type to client_type_name
|
|
|
|
d['client_type_name'] = d['client_type']
|
2017-10-31 18:36:18 +01:00
|
|
|
if 'client_gravatar' not in d:
|
|
|
|
# Temporary migration for the addition of the client_gravatar field
|
|
|
|
d['client_gravatar'] = False
|
|
|
|
|
2020-02-02 17:29:05 +01:00
|
|
|
if 'slim_presence' not in d:
|
|
|
|
d['slim_presence'] = False
|
|
|
|
|
2017-10-31 18:36:18 +01:00
|
|
|
ret = cls(
|
|
|
|
d['user_profile_id'],
|
|
|
|
d['realm_id'],
|
|
|
|
EventQueue.from_dict(d['event_queue']),
|
|
|
|
d['event_types'],
|
|
|
|
d['client_type_name'],
|
|
|
|
d['apply_markdown'],
|
|
|
|
d['client_gravatar'],
|
2020-02-02 17:29:05 +01:00
|
|
|
d['slim_presence'],
|
2017-10-31 18:36:18 +01:00
|
|
|
d['all_public_streams'],
|
|
|
|
d['queue_timeout'],
|
|
|
|
d.get('narrow', [])
|
|
|
|
)
|
2013-11-19 23:51:32 +01:00
|
|
|
ret.last_connection_time = d['last_connection_time']
|
|
|
|
return ret
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def prepare_for_pickling(self) -> None:
|
2014-01-27 23:53:13 +01:00
|
|
|
self.current_handler_id = None
|
2013-03-22 23:25:37 +01:00
|
|
|
self._timeout_handle = None
|
2013-03-26 18:06:00 +01:00
|
|
|
|
event_queue: Fix confusing event_queue.push interface.
In e3ad9baf1d1dd862b4e3cd1aae7b3c3067d55752, we introduced yet another
bug where we incorrectly shared event dictionaries between multiple
queues.
Fortunately, the logging that reports on "event was not in the queue"
issues worked and detected this on chat.zulip.org, but this is a clear
indication that the comments we have around this system were not
sufficient to produce correct behavior.
We fix this by changing event_queue.push, the code that mutates the
event dictionaries, to do the shallow copies itself. The only
downside here is process_message_event, a relatively low-traffic code
path, does an extra per-queue dictionary copy. Given that presence,
heartbeat, and message reading events are likely more traffic and
dealing with HTTP is likely much more expensive than a dictionary
copy, this probably doesn't matter performance-wise.
(And if profiling later finds it is, there are potential workarounds
like passing a skip_copy argument we can do).
2020-02-05 20:43:09 +01:00
|
|
|
def add_event(self, event: Mapping[str, Any]) -> None:
|
2014-01-27 23:53:13 +01:00
|
|
|
if self.current_handler_id is not None:
|
|
|
|
handler = get_handler_by_id(self.current_handler_id)
|
2018-10-17 00:39:10 +02:00
|
|
|
async_request_timer_restart(handler._request)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
|
|
|
self.event_queue.push(event)
|
2013-12-12 18:59:02 +01:00
|
|
|
self.finish_current_handler()
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def finish_current_handler(self) -> bool:
|
2014-01-27 23:53:13 +01:00
|
|
|
if self.current_handler_id is not None:
|
2013-12-11 00:21:51 +01:00
|
|
|
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
|
2013-03-26 18:06:00 +01:00
|
|
|
try:
|
2014-01-28 16:59:59 +01:00
|
|
|
finish_handler(self.current_handler_id, self.event_queue.id,
|
|
|
|
self.event_queue.contents(), self.apply_markdown)
|
2013-12-10 23:40:30 +01:00
|
|
|
except Exception:
|
2013-12-11 00:21:51 +01:00
|
|
|
logging.exception(err_msg)
|
2013-12-10 23:44:35 +01:00
|
|
|
finally:
|
2016-10-19 22:26:55 +02:00
|
|
|
self.disconnect_handler()
|
2013-12-12 18:59:02 +01:00
|
|
|
return True
|
|
|
|
return False
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def accepts_event(self, event: Mapping[str, Any]) -> bool:
|
2013-12-13 18:06:13 +01:00
|
|
|
if self.event_types is not None and event["type"] not in self.event_types:
|
2013-12-10 16:28:16 +01:00
|
|
|
return False
|
|
|
|
if event["type"] == "message":
|
|
|
|
return self.narrow_filter(event)
|
|
|
|
return True
|
2013-12-10 16:35:16 +01:00
|
|
|
|
|
|
|
# TODO: Refactor so we don't need this function
|
2017-11-27 11:09:23 +01:00
|
|
|
def accepts_messages(self) -> bool:
|
2013-12-10 16:35:16 +01:00
|
|
|
return self.event_types is None or "message" in self.event_types
|
2013-03-22 22:43:49 +01:00
|
|
|
|
2018-12-05 23:42:06 +01:00
|
|
|
def expired(self, now: float) -> bool:
|
2017-01-24 05:50:04 +01:00
|
|
|
return (self.current_handler_id is None and
|
|
|
|
now - self.last_connection_time >= self.queue_timeout)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2018-05-11 01:43:30 +02:00
|
|
|
def connect_handler(self, handler_id: int, client_name: str) -> None:
|
2014-01-27 23:53:13 +01:00
|
|
|
self.current_handler_id = handler_id
|
2014-01-28 17:17:21 +01:00
|
|
|
self.current_client_name = client_name
|
2014-01-28 17:12:18 +01:00
|
|
|
set_descriptor_by_handler_id(handler_id, self)
|
2013-03-26 18:06:00 +01:00
|
|
|
self.last_connection_time = time.time()
|
2016-11-29 07:22:02 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def timeout_callback() -> None:
|
2013-11-21 18:52:32 +01:00
|
|
|
self._timeout_handle = None
|
|
|
|
# All clients get heartbeat events
|
|
|
|
self.add_event(dict(type='heartbeat'))
|
|
|
|
ioloop = tornado.ioloop.IOLoop.instance()
|
2017-11-30 00:15:49 +01:00
|
|
|
interval = HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
|
2014-01-28 18:13:41 +01:00
|
|
|
if self.client_type_name != 'API: heartbeat test':
|
2017-11-30 00:15:49 +01:00
|
|
|
self._timeout_handle = ioloop.call_later(interval, timeout_callback)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def disconnect_handler(self, client_closed: bool=False) -> None:
|
2014-01-27 23:53:13 +01:00
|
|
|
if self.current_handler_id:
|
2016-02-13 06:34:14 +01:00
|
|
|
clear_descriptor_by_handler_id(self.current_handler_id, None)
|
2016-03-21 00:12:37 +01:00
|
|
|
clear_handler_by_id(self.current_handler_id)
|
2013-12-12 19:47:24 +01:00
|
|
|
if client_closed:
|
2014-01-27 23:57:45 +01:00
|
|
|
logging.info("Client disconnected for queue %s (%s via %s)" %
|
2019-11-16 02:05:03 +01:00
|
|
|
(self.event_queue.id, self.user_profile_id,
|
2014-01-28 17:17:21 +01:00
|
|
|
self.current_client_name))
|
2014-01-27 23:53:13 +01:00
|
|
|
self.current_handler_id = None
|
2014-01-28 17:17:21 +01:00
|
|
|
self.current_client_name = None
|
2016-10-19 22:26:55 +02:00
|
|
|
if self._timeout_handle is not None:
|
2013-03-22 23:25:37 +01:00
|
|
|
ioloop = tornado.ioloop.IOLoop.instance()
|
|
|
|
ioloop.remove_timeout(self._timeout_handle)
|
|
|
|
self._timeout_handle = None
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def cleanup(self) -> None:
|
2015-12-27 18:46:31 +01:00
|
|
|
# Before we can GC the event queue, we need to disconnect the
|
|
|
|
# handler and notify the client (or connection server) so that
|
|
|
|
# they can cleanup their own state related to the GC'd event
|
|
|
|
# queue. Finishing the handler before we GC ensures the
|
|
|
|
# invariant that event queues are idle when passed to
|
|
|
|
# `do_gc_event_queues` is preserved.
|
2016-10-19 22:26:55 +02:00
|
|
|
self.finish_current_handler()
|
2016-06-05 22:35:27 +02:00
|
|
|
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id},
|
|
|
|
{self.realm_id})
|
2013-11-19 23:11:30 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def compute_full_event_type(event: Mapping[str, Any]) -> str:
|
2013-11-22 20:30:32 +01:00
|
|
|
if event["type"] == "update_message_flags":
|
|
|
|
if event["all"]:
|
|
|
|
# Put the "all" case in its own category
|
2013-11-22 21:04:41 +01:00
|
|
|
return "all_flags/%s/%s" % (event["flag"], event["operation"])
|
|
|
|
return "flags/%s/%s" % (event["operation"], event["flag"])
|
2013-11-22 20:30:32 +01:00
|
|
|
return event["type"]
|
|
|
|
|
2017-11-05 11:52:10 +01:00
|
|
|
class EventQueue:
|
2017-11-27 11:09:23 +01:00
|
|
|
def __init__(self, id: str) -> None:
|
2019-08-06 00:08:56 +02:00
|
|
|
# When extending this list of properties, one must be sure to
|
|
|
|
# update to_dict and from_dict.
|
|
|
|
|
2019-08-06 01:29:34 +02:00
|
|
|
self.queue = deque() # type: Deque[Dict[str, Any]]
|
2017-06-04 12:15:23 +02:00
|
|
|
self.next_event_id = 0 # type: int
|
2019-08-06 00:08:56 +02:00
|
|
|
self.newest_pruned_id = -1 # type: Optional[int] # will only be None for migration from old versions
|
2017-06-04 12:15:23 +02:00
|
|
|
self.id = id # type: str
|
|
|
|
self.virtual_events = {} # type: Dict[str, Dict[str, Any]]
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
2013-11-22 20:30:32 +01:00
|
|
|
# If you add a new key to this dict, make sure you add appropriate
|
|
|
|
# migration code in from_dict or load_event_queues to account for
|
|
|
|
# loading event queues that lack that key.
|
2019-08-06 00:08:56 +02:00
|
|
|
d = dict(
|
|
|
|
id=self.id,
|
|
|
|
next_event_id=self.next_event_id,
|
|
|
|
queue=list(self.queue),
|
|
|
|
virtual_events=self.virtual_events,
|
|
|
|
)
|
|
|
|
if self.newest_pruned_id is not None:
|
|
|
|
d['newest_pruned_id'] = self.newest_pruned_id
|
|
|
|
return d
|
2013-11-19 23:51:32 +01:00
|
|
|
|
|
|
|
@classmethod
|
2017-11-27 11:09:23 +01:00
|
|
|
def from_dict(cls, d: Dict[str, Any]) -> 'EventQueue':
|
2013-11-19 23:51:32 +01:00
|
|
|
ret = cls(d['id'])
|
|
|
|
ret.next_event_id = d['next_event_id']
|
2019-08-06 00:08:56 +02:00
|
|
|
ret.newest_pruned_id = d.get('newest_pruned_id', None)
|
2013-11-19 23:51:32 +01:00
|
|
|
ret.queue = deque(d['queue'])
|
2013-11-22 20:30:32 +01:00
|
|
|
ret.virtual_events = d.get("virtual_events", {})
|
2013-11-19 23:51:32 +01:00
|
|
|
return ret
|
|
|
|
|
event_queue: Fix confusing event_queue.push interface.
In e3ad9baf1d1dd862b4e3cd1aae7b3c3067d55752, we introduced yet another
bug where we incorrectly shared event dictionaries between multiple
queues.
Fortunately, the logging that reports on "event was not in the queue"
issues worked and detected this on chat.zulip.org, but this is a clear
indication that the comments we have around this system were not
sufficient to produce correct behavior.
We fix this by changing event_queue.push, the code that mutates the
event dictionaries, to do the shallow copies itself. The only
downside here is process_message_event, a relatively low-traffic code
path, does an extra per-queue dictionary copy. Given that presence,
heartbeat, and message reading events are likely more traffic and
dealing with HTTP is likely much more expensive than a dictionary
copy, this probably doesn't matter performance-wise.
(And if profiling later finds it is, there are potential workarounds
like passing a skip_copy argument we can do).
2020-02-05 20:43:09 +01:00
|
|
|
def push(self, orig_event: Mapping[str, Any]) -> None:
|
|
|
|
# By default, we make a shallow copy of the event dictionary
|
|
|
|
# to push into the target event queue; this allows the calling
|
|
|
|
# code to send the same "event" object to multiple queues.
|
|
|
|
# This behavior is important because the event_queue system is
|
|
|
|
# about to mutate the event dictionary, minimally to add the
|
|
|
|
# event_id attribute.
|
|
|
|
event = dict(orig_event)
|
2013-03-26 18:06:00 +01:00
|
|
|
event['id'] = self.next_event_id
|
|
|
|
self.next_event_id += 1
|
2013-11-22 20:30:32 +01:00
|
|
|
full_event_type = compute_full_event_type(event)
|
2013-11-22 21:04:41 +01:00
|
|
|
if (full_event_type in ["pointer", "restart"] or
|
2016-12-03 18:19:09 +01:00
|
|
|
full_event_type.startswith("flags/")):
|
2013-11-22 20:30:32 +01:00
|
|
|
if full_event_type not in self.virtual_events:
|
|
|
|
self.virtual_events[full_event_type] = copy.deepcopy(event)
|
|
|
|
return
|
|
|
|
# Update the virtual event with the values from the event
|
|
|
|
virtual_event = self.virtual_events[full_event_type]
|
|
|
|
virtual_event["id"] = event["id"]
|
|
|
|
if "timestamp" in event:
|
|
|
|
virtual_event["timestamp"] = event["timestamp"]
|
|
|
|
if full_event_type == "pointer":
|
|
|
|
virtual_event["pointer"] = event["pointer"]
|
2013-11-22 20:53:59 +01:00
|
|
|
elif full_event_type == "restart":
|
|
|
|
virtual_event["server_generation"] = event["server_generation"]
|
2013-11-22 21:04:41 +01:00
|
|
|
elif full_event_type.startswith("flags/"):
|
|
|
|
virtual_event["messages"] += event["messages"]
|
2013-11-22 20:30:32 +01:00
|
|
|
else:
|
|
|
|
self.queue.append(event)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-12-11 23:09:59 +01:00
|
|
|
# Note that pop ignores virtual events. This is fine in our
|
|
|
|
# current usage since virtual events should always be resolved to
|
|
|
|
# a real event before being given to users.
|
2017-11-27 11:09:23 +01:00
|
|
|
def pop(self) -> Dict[str, Any]:
|
2013-03-26 18:06:00 +01:00
|
|
|
return self.queue.popleft()
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def empty(self) -> bool:
|
2013-12-11 22:58:12 +01:00
|
|
|
return len(self.queue) == 0 and len(self.virtual_events) == 0
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-12-11 23:09:59 +01:00
|
|
|
# See the comment on pop; that applies here as well
|
2017-11-27 11:09:23 +01:00
|
|
|
def prune(self, through_id: int) -> None:
|
2013-12-11 23:09:59 +01:00
|
|
|
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
|
2019-08-06 00:08:56 +02:00
|
|
|
self.newest_pruned_id = self.queue[0]['id']
|
2013-03-26 18:06:00 +01:00
|
|
|
self.pop()
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def contents(self) -> List[Dict[str, Any]]:
|
2017-06-04 12:15:23 +02:00
|
|
|
contents = [] # type: List[Dict[str, Any]]
|
|
|
|
virtual_id_map = {} # type: Dict[str, Dict[str, Any]]
|
2013-11-22 20:30:32 +01:00
|
|
|
for event_type in self.virtual_events:
|
|
|
|
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
|
|
|
|
virtual_ids = sorted(list(virtual_id_map.keys()))
|
|
|
|
|
|
|
|
# Merge the virtual events into their final place in the queue
|
|
|
|
index = 0
|
|
|
|
length = len(virtual_ids)
|
|
|
|
for event in self.queue:
|
|
|
|
while index < length and virtual_ids[index] < event["id"]:
|
|
|
|
contents.append(virtual_id_map[virtual_ids[index]])
|
|
|
|
index += 1
|
|
|
|
contents.append(event)
|
|
|
|
while index < length:
|
|
|
|
contents.append(virtual_id_map[virtual_ids[index]])
|
|
|
|
index += 1
|
|
|
|
|
|
|
|
self.virtual_events = {}
|
|
|
|
self.queue = deque(contents)
|
|
|
|
return contents
|
2013-03-26 18:06:00 +01:00
|
|
|
|
|
|
|
# maps queue ids to client descriptors
|
2017-06-04 12:15:23 +02:00
|
|
|
clients = {} # type: Dict[str, ClientDescriptor]
|
2013-03-26 18:06:00 +01:00
|
|
|
# maps user id to list of client descriptors
|
2017-06-04 12:15:23 +02:00
|
|
|
user_clients = {} # type: Dict[int, List[ClientDescriptor]]
|
2013-10-17 23:51:25 +02:00
|
|
|
# maps realm id to list of client descriptors with all_public_streams=True
|
2017-06-04 12:15:23 +02:00
|
|
|
realm_clients_all_streams = {} # type: Dict[int, List[ClientDescriptor]]
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-05-22 23:49:02 +02:00
|
|
|
# list of registered gc hooks.
|
|
|
|
# each one will be called with a user profile id, queue, and bool
|
|
|
|
# last_for_client that is true if this is the last queue pertaining
|
|
|
|
# to this user_profile_id
|
|
|
|
# that is about to be deleted
|
2017-06-04 12:15:23 +02:00
|
|
|
gc_hooks = [] # type: List[Callable[[int, ClientDescriptor, bool], None]]
|
2013-05-22 23:49:02 +02:00
|
|
|
|
2013-03-26 18:06:00 +01:00
|
|
|
next_queue_id = 0
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def clear_client_event_queues_for_testing() -> None:
|
2017-10-12 01:37:44 +02:00
|
|
|
assert(settings.TEST_SUITE)
|
|
|
|
clients.clear()
|
|
|
|
user_clients.clear()
|
|
|
|
realm_clients_all_streams.clear()
|
|
|
|
gc_hooks.clear()
|
|
|
|
global next_queue_id
|
|
|
|
next_queue_id = 0
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def add_client_gc_hook(hook: Callable[[int, ClientDescriptor, bool], None]) -> None:
|
2013-05-22 23:49:02 +02:00
|
|
|
gc_hooks.append(hook)
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def get_client_descriptor(queue_id: str) -> ClientDescriptor:
|
2013-03-27 22:19:24 +01:00
|
|
|
return clients.get(queue_id)
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def get_client_descriptors_for_user(user_profile_id: int) -> List[ClientDescriptor]:
|
2013-03-27 22:19:24 +01:00
|
|
|
return user_clients.get(user_profile_id, [])
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def get_client_descriptors_for_realm_all_streams(realm_id: int) -> List[ClientDescriptor]:
|
2013-10-17 23:51:25 +02:00
|
|
|
return realm_clients_all_streams.get(realm_id, [])
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def add_to_client_dicts(client: ClientDescriptor) -> None:
|
2013-12-13 20:59:56 +01:00
|
|
|
user_clients.setdefault(client.user_profile_id, []).append(client)
|
2013-12-13 20:20:28 +01:00
|
|
|
if client.all_public_streams or client.narrow != []:
|
2013-12-13 20:59:56 +01:00
|
|
|
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def allocate_client_descriptor(new_queue_data: MutableMapping[str, Any]) -> ClientDescriptor:
|
2013-03-26 18:06:00 +01:00
|
|
|
global next_queue_id
|
2014-01-28 18:11:08 +01:00
|
|
|
queue_id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
|
2013-03-26 18:06:00 +01:00
|
|
|
next_queue_id += 1
|
2014-01-28 18:11:08 +01:00
|
|
|
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
|
|
|
|
client = ClientDescriptor.from_dict(new_queue_data)
|
|
|
|
clients[queue_id] = client
|
2013-12-13 20:59:56 +01:00
|
|
|
add_to_client_dicts(client)
|
2013-03-26 18:06:00 +01:00
|
|
|
return client
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def do_gc_event_queues(to_remove: AbstractSet[str], affected_users: AbstractSet[int],
|
|
|
|
affected_realms: AbstractSet[int]) -> None:
|
|
|
|
def filter_client_dict(client_dict: MutableMapping[int, List[ClientDescriptor]], key: int) -> None:
|
2013-10-17 23:51:25 +02:00
|
|
|
if key not in client_dict:
|
|
|
|
return
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2015-11-01 17:14:31 +01:00
|
|
|
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
|
2013-04-09 19:24:55 +02:00
|
|
|
if len(new_client_list) == 0:
|
2013-10-17 23:51:25 +02:00
|
|
|
del client_dict[key]
|
2013-04-09 19:24:55 +02:00
|
|
|
else:
|
2013-10-17 23:51:25 +02:00
|
|
|
client_dict[key] = new_client_list
|
|
|
|
|
|
|
|
for user_id in affected_users:
|
|
|
|
filter_client_dict(user_clients, user_id)
|
|
|
|
|
|
|
|
for realm_id in affected_realms:
|
|
|
|
filter_client_dict(realm_clients_all_streams, realm_id)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-05-22 23:49:02 +02:00
|
|
|
for id in to_remove:
|
|
|
|
for cb in gc_hooks:
|
|
|
|
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
|
|
|
|
del clients[id]
|
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
def gc_event_queues(port: int) -> None:
|
2013-11-19 23:11:30 +01:00
|
|
|
start = time.time()
|
2017-06-04 12:15:23 +02:00
|
|
|
to_remove = set() # type: Set[str]
|
|
|
|
affected_users = set() # type: Set[int]
|
|
|
|
affected_realms = set() # type: Set[int]
|
2017-09-27 10:09:12 +02:00
|
|
|
for (id, client) in clients.items():
|
2018-12-05 23:42:06 +01:00
|
|
|
if client.expired(start):
|
2013-11-19 23:11:30 +01:00
|
|
|
to_remove.add(id)
|
|
|
|
affected_users.add(client.user_profile_id)
|
|
|
|
affected_realms.add(client.realm_id)
|
|
|
|
|
2015-12-27 18:46:31 +01:00
|
|
|
# We don't need to call e.g. finish_current_handler on the clients
|
2018-12-05 23:42:06 +01:00
|
|
|
# being removed because they are guaranteed to be idle (because
|
|
|
|
# they are expired) and thus not have a current handler.
|
2013-11-19 23:11:30 +01:00
|
|
|
do_gc_event_queues(to_remove, affected_users, affected_realms)
|
|
|
|
|
2017-09-14 20:52:40 +02:00
|
|
|
if settings.PRODUCTION:
|
2018-12-05 23:42:06 +01:00
|
|
|
logging.info(('Tornado %d removed %d expired event queues owned by %d users in %.3fs.' +
|
2017-09-14 20:52:40 +02:00
|
|
|
' Now %d active queues, %s')
|
2018-10-04 00:22:39 +02:00
|
|
|
% (port, len(to_remove), len(affected_users), time.time() - start,
|
2017-09-14 20:52:40 +02:00
|
|
|
len(clients), handler_stats_string()))
|
2013-05-24 23:27:19 +02:00
|
|
|
statsd.gauge('tornado.active_queues', len(clients))
|
|
|
|
statsd.gauge('tornado.active_users', len(user_clients))
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
def persistent_queue_filename(port: int, last: bool=False) -> str:
|
|
|
|
if settings.TORNADO_PROCESSES == 1:
|
|
|
|
# Use non-port-aware, legacy version.
|
|
|
|
if last:
|
2019-01-15 03:03:10 +01:00
|
|
|
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('',) + '.last'
|
2018-10-04 00:22:39 +02:00
|
|
|
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('',)
|
2018-10-04 00:14:16 +02:00
|
|
|
if last:
|
2019-01-15 03:03:10 +01:00
|
|
|
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('.' + str(port) + '.last',)
|
2018-10-04 00:22:39 +02:00
|
|
|
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('.' + str(port),)
|
2018-10-04 00:14:16 +02:00
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
def dump_event_queues(port: int) -> None:
|
2013-03-20 23:03:41 +01:00
|
|
|
start = time.time()
|
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
with open(persistent_queue_filename(port), "w") as stored_queues:
|
2017-09-27 10:09:12 +02:00
|
|
|
ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.items()],
|
2013-11-19 23:51:32 +01:00
|
|
|
stored_queues)
|
2013-03-20 23:03:41 +01:00
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
logging.info('Tornado %d dumped %d event queues in %.3fs'
|
|
|
|
% (port, len(clients), time.time() - start))
|
2013-03-20 23:03:41 +01:00
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
def load_event_queues(port: int) -> None:
|
2013-03-20 23:03:41 +01:00
|
|
|
global clients
|
|
|
|
start = time.time()
|
2013-11-19 23:51:32 +01:00
|
|
|
|
2014-01-28 19:07:45 +01:00
|
|
|
# ujson chokes on bad input pretty easily. We separate out the actual
|
|
|
|
# file reading from the loading so that we don't silently fail if we get
|
|
|
|
# bad input.
|
|
|
|
try:
|
2018-10-04 00:22:39 +02:00
|
|
|
with open(persistent_queue_filename(port), "r") as stored_queues:
|
2014-01-28 19:07:45 +01:00
|
|
|
json_data = stored_queues.read()
|
2013-11-19 23:51:32 +01:00
|
|
|
try:
|
2014-01-28 19:07:45 +01:00
|
|
|
clients = dict((qid, ClientDescriptor.from_dict(client))
|
|
|
|
for (qid, client) in ujson.loads(json_data))
|
|
|
|
except Exception:
|
2018-10-04 00:22:39 +02:00
|
|
|
logging.exception("Tornado %d could not deserialize event queues" % (port,))
|
2014-01-28 19:07:45 +01:00
|
|
|
except (IOError, EOFError):
|
|
|
|
pass
|
2013-03-20 23:03:41 +01:00
|
|
|
|
2017-09-27 10:09:12 +02:00
|
|
|
for client in clients.values():
|
2013-11-19 23:25:59 +01:00
|
|
|
# Put code for migrations due to event queue data format changes here
|
2013-10-17 23:51:25 +02:00
|
|
|
|
2013-12-13 20:59:56 +01:00
|
|
|
add_to_client_dicts(client)
|
2013-03-20 23:03:41 +01:00
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
logging.info('Tornado %d loaded %d event queues in %.3fs'
|
|
|
|
% (port, len(clients), time.time() - start))
|
2013-03-20 23:03:41 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def send_restart_events(immediate: bool=False) -> None:
|
2017-06-04 12:15:23 +02:00
|
|
|
event = dict(type='restart', server_generation=settings.SERVER_GENERATION) # type: Dict[str, Any]
|
2016-03-31 07:42:34 +02:00
|
|
|
if immediate:
|
|
|
|
event['immediate'] = True
|
2017-09-27 10:09:12 +02:00
|
|
|
for client in clients.values():
|
2013-12-10 16:35:16 +01:00
|
|
|
if client.accepts_event(event):
|
event_queue: Fix confusing event_queue.push interface.
In e3ad9baf1d1dd862b4e3cd1aae7b3c3067d55752, we introduced yet another
bug where we incorrectly shared event dictionaries between multiple
queues.
Fortunately, the logging that reports on "event was not in the queue"
issues worked and detected this on chat.zulip.org, but this is a clear
indication that the comments we have around this system were not
sufficient to produce correct behavior.
We fix this by changing event_queue.push, the code that mutates the
event dictionaries, to do the shallow copies itself. The only
downside here is process_message_event, a relatively low-traffic code
path, does an extra per-queue dictionary copy. Given that presence,
heartbeat, and message reading events are likely more traffic and
dealing with HTTP is likely much more expensive than a dictionary
copy, this probably doesn't matter performance-wise.
(And if profiling later finds it is, there are potential workarounds
like passing a skip_copy argument we can do).
2020-02-05 20:43:09 +01:00
|
|
|
client.add_event(event)
|
2013-03-20 23:53:46 +01:00
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
def setup_event_queue(port: int) -> None:
|
2016-04-13 04:28:49 +02:00
|
|
|
if not settings.TEST_SUITE:
|
2018-10-04 00:22:39 +02:00
|
|
|
load_event_queues(port)
|
|
|
|
atexit.register(dump_event_queues, port)
|
2016-04-13 04:28:49 +02:00
|
|
|
# Make sure we dump event queues even if we exit via signal
|
2017-08-07 02:01:59 +02:00
|
|
|
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
|
2019-08-02 21:14:29 +02:00
|
|
|
add_reload_hook(lambda: dump_event_queues(port))
|
2013-03-20 23:03:41 +01:00
|
|
|
|
2013-11-19 23:51:32 +01:00
|
|
|
try:
|
2018-10-04 00:22:39 +02:00
|
|
|
os.rename(persistent_queue_filename(port), persistent_queue_filename(port, last=True))
|
2013-11-19 23:51:32 +01:00
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
2013-03-20 23:03:41 +01:00
|
|
|
# Set up event queue garbage collection
|
2013-03-22 23:24:28 +01:00
|
|
|
ioloop = tornado.ioloop.IOLoop.instance()
|
2018-10-04 00:22:39 +02:00
|
|
|
pc = tornado.ioloop.PeriodicCallback(lambda: gc_event_queues(port),
|
2013-03-22 23:24:28 +01:00
|
|
|
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
|
2013-03-26 18:06:00 +01:00
|
|
|
pc.start()
|
2013-03-14 23:21:53 +01:00
|
|
|
|
2016-03-31 07:42:34 +02:00
|
|
|
send_restart_events(immediate=settings.DEVELOPMENT)
|
2013-03-20 23:53:46 +01:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def fetch_events(query: Mapping[str, Any]) -> Dict[str, Any]:
|
2017-06-04 12:15:23 +02:00
|
|
|
queue_id = query["queue_id"] # type: str
|
|
|
|
dont_block = query["dont_block"] # type: bool
|
|
|
|
last_event_id = query["last_event_id"] # type: int
|
|
|
|
user_profile_id = query["user_profile_id"] # type: int
|
|
|
|
new_queue_data = query.get("new_queue_data") # type: Optional[MutableMapping[str, Any]]
|
2018-05-11 01:43:30 +02:00
|
|
|
client_type_name = query["client_type_name"] # type: str
|
2017-06-04 12:15:23 +02:00
|
|
|
handler_id = query["handler_id"] # type: int
|
2014-01-28 18:11:08 +01:00
|
|
|
|
2015-12-27 20:36:20 +01:00
|
|
|
try:
|
|
|
|
was_connected = False
|
|
|
|
orig_queue_id = queue_id
|
|
|
|
extra_log_data = ""
|
|
|
|
if queue_id is None:
|
|
|
|
if dont_block:
|
|
|
|
client = allocate_client_descriptor(new_queue_data)
|
|
|
|
queue_id = client.event_queue.id
|
|
|
|
else:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_("Missing 'queue_id' argument"))
|
2014-01-27 23:21:39 +01:00
|
|
|
else:
|
2015-12-27 20:36:20 +01:00
|
|
|
if last_event_id is None:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_("Missing 'last_event_id' argument"))
|
2015-12-27 20:36:20 +01:00
|
|
|
client = get_client_descriptor(queue_id)
|
|
|
|
if client is None:
|
2017-07-21 02:20:31 +02:00
|
|
|
raise BadEventQueueIdError(queue_id)
|
2015-12-27 20:36:20 +01:00
|
|
|
if user_profile_id != client.user_profile_id:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_("You are not authorized to get events from this queue"))
|
2019-08-06 00:08:56 +02:00
|
|
|
if (
|
|
|
|
client.event_queue.newest_pruned_id is not None
|
|
|
|
and last_event_id < client.event_queue.newest_pruned_id
|
|
|
|
):
|
|
|
|
raise JsonableError(_("An event newer than %s has already been pruned!") % (last_event_id,))
|
2015-12-27 20:36:20 +01:00
|
|
|
client.event_queue.prune(last_event_id)
|
2019-08-06 00:08:56 +02:00
|
|
|
if (
|
|
|
|
client.event_queue.newest_pruned_id is not None
|
|
|
|
and last_event_id != client.event_queue.newest_pruned_id
|
|
|
|
):
|
|
|
|
raise JsonableError(_("Event %s was not in this queue") % (last_event_id,))
|
2015-12-27 20:36:20 +01:00
|
|
|
was_connected = client.finish_current_handler()
|
|
|
|
|
|
|
|
if not client.event_queue.empty() or dont_block:
|
2014-01-28 20:03:05 +01:00
|
|
|
response = dict(events=client.event_queue.contents(),
|
2017-06-04 12:15:23 +02:00
|
|
|
handler_id=handler_id) # type: Dict[str, Any]
|
2015-12-27 20:36:20 +01:00
|
|
|
if orig_queue_id is None:
|
2014-01-28 20:03:05 +01:00
|
|
|
response['queue_id'] = queue_id
|
2016-10-19 22:14:52 +02:00
|
|
|
if len(response["events"]) == 1:
|
|
|
|
extra_log_data = "[%s/%s/%s]" % (queue_id, len(response["events"]),
|
|
|
|
response["events"][0]["type"])
|
|
|
|
else:
|
|
|
|
extra_log_data = "[%s/%s]" % (queue_id, len(response["events"]))
|
2015-12-27 20:36:20 +01:00
|
|
|
if was_connected:
|
|
|
|
extra_log_data += " [was connected]"
|
2014-01-28 20:03:05 +01:00
|
|
|
return dict(type="response", response=response, extra_log_data=extra_log_data)
|
2015-12-27 20:36:20 +01:00
|
|
|
|
|
|
|
# After this point, dont_block=False, the queue is empty, and we
|
|
|
|
# have a pre-existing queue, so we wait for new events.
|
2014-01-27 23:21:39 +01:00
|
|
|
if was_connected:
|
2019-11-16 02:05:03 +01:00
|
|
|
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_id,
|
2015-12-27 20:36:20 +01:00
|
|
|
client_type_name))
|
|
|
|
except JsonableError as e:
|
2017-07-21 02:32:52 +02:00
|
|
|
return dict(type="error", exception=e)
|
2015-12-27 20:36:20 +01:00
|
|
|
|
2014-01-28 18:13:41 +01:00
|
|
|
client.connect_handler(handler_id, client_type_name)
|
2014-01-28 20:03:05 +01:00
|
|
|
return dict(type="async")
|
2014-01-27 23:21:39 +01:00
|
|
|
|
2013-03-22 18:21:50 +01:00
|
|
|
# The following functions are called from Django
|
|
|
|
|
2017-12-30 08:52:28 +01:00
|
|
|
def request_event_queue(user_profile: UserProfile, user_client: Client, apply_markdown: bool,
|
2020-02-02 17:29:05 +01:00
|
|
|
client_gravatar: bool, slim_presence: bool, queue_lifespan_secs: int,
|
2017-12-30 08:52:28 +01:00
|
|
|
event_types: Optional[Iterable[str]]=None,
|
|
|
|
all_public_streams: bool=False,
|
2018-05-11 01:43:30 +02:00
|
|
|
narrow: Iterable[Sequence[str]]=[]) -> Optional[str]:
|
2013-03-14 23:21:53 +01:00
|
|
|
if settings.TORNADO_SERVER:
|
2018-11-02 23:51:05 +01:00
|
|
|
tornado_uri = get_tornado_uri(user_profile.realm)
|
2016-12-02 00:08:34 +01:00
|
|
|
req = {'dont_block': 'true',
|
2013-06-18 23:55:55 +02:00
|
|
|
'apply_markdown': ujson.dumps(apply_markdown),
|
2017-10-31 18:36:18 +01:00
|
|
|
'client_gravatar': ujson.dumps(client_gravatar),
|
2020-02-02 17:29:05 +01:00
|
|
|
'slim_presence': ujson.dumps(slim_presence),
|
2013-10-17 23:51:25 +02:00
|
|
|
'all_public_streams': ujson.dumps(all_public_streams),
|
2016-12-02 00:08:34 +01:00
|
|
|
'client': 'internal',
|
2018-07-13 12:58:16 +02:00
|
|
|
'user_profile_id': user_profile.id,
|
2016-12-02 00:08:34 +01:00
|
|
|
'user_client': user_client.name,
|
|
|
|
'narrow': ujson.dumps(narrow),
|
2018-07-13 12:58:16 +02:00
|
|
|
'secret': settings.SHARED_SECRET,
|
2016-12-02 00:08:34 +01:00
|
|
|
'lifespan_secs': queue_lifespan_secs}
|
2013-03-22 22:43:49 +01:00
|
|
|
if event_types is not None:
|
2013-06-18 23:55:55 +02:00
|
|
|
req['event_types'] = ujson.dumps(event_types)
|
2016-12-18 11:35:36 +01:00
|
|
|
|
|
|
|
try:
|
2018-11-02 23:51:05 +01:00
|
|
|
resp = requests_client.post(tornado_uri + '/api/v1/events/internal',
|
2018-07-13 12:58:16 +02:00
|
|
|
data=req)
|
2016-12-18 11:35:36 +01:00
|
|
|
except requests.adapters.ConnectionError:
|
|
|
|
logging.error('Tornado server does not seem to be running, check %s '
|
|
|
|
'and %s for more information.' %
|
|
|
|
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
|
|
|
|
raise requests.adapters.ConnectionError(
|
|
|
|
"Django cannot connect to Tornado server (%s); try restarting" %
|
2018-11-02 23:51:05 +01:00
|
|
|
(tornado_uri,))
|
2013-03-14 23:21:53 +01:00
|
|
|
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
2019-08-10 00:30:34 +02:00
|
|
|
return resp.json()['queue_id']
|
2013-03-14 23:21:53 +01:00
|
|
|
|
|
|
|
return None
|
2013-03-22 18:21:50 +01:00
|
|
|
|
2019-08-10 00:30:34 +02:00
|
|
|
def get_user_events(user_profile: UserProfile, queue_id: str, last_event_id: int) -> List[Dict[str, Any]]:
|
2013-03-22 18:21:50 +01:00
|
|
|
if settings.TORNADO_SERVER:
|
2018-11-02 23:51:05 +01:00
|
|
|
tornado_uri = get_tornado_uri(user_profile.realm)
|
2018-07-13 12:58:16 +02:00
|
|
|
post_data = {
|
|
|
|
'queue_id': queue_id,
|
|
|
|
'last_event_id': last_event_id,
|
|
|
|
'dont_block': 'true',
|
|
|
|
'user_profile_id': user_profile.id,
|
|
|
|
'secret': settings.SHARED_SECRET,
|
|
|
|
'client': 'internal'
|
|
|
|
} # type: Dict[str, Any]
|
2018-11-02 23:51:05 +01:00
|
|
|
resp = requests_client.post(tornado_uri + '/api/v1/events/internal',
|
2018-07-13 12:58:16 +02:00
|
|
|
data=post_data)
|
2013-03-22 18:21:50 +01:00
|
|
|
resp.raise_for_status()
|
|
|
|
|
2019-08-10 00:30:34 +02:00
|
|
|
return resp.json()['events']
|
2017-03-03 23:15:18 +01:00
|
|
|
return []
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
# Send email notifications to idle users
|
|
|
|
# after they are idle for 1 hour
|
|
|
|
NOTIFY_AFTER_IDLE_HOURS = 1
|
2017-11-27 11:09:23 +01:00
|
|
|
def build_offline_notification(user_profile_id: int, message_id: int) -> Dict[str, Any]:
|
2014-04-24 02:16:53 +02:00
|
|
|
return {"user_profile_id": user_profile_id,
|
|
|
|
"message_id": message_id,
|
2018-12-12 21:22:43 +01:00
|
|
|
"type": "add",
|
2014-04-24 02:16:53 +02:00
|
|
|
"timestamp": time.time()}
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def missedmessage_hook(user_profile_id: int, client: ClientDescriptor, last_for_client: bool) -> None:
|
2017-10-03 19:54:54 +02:00
|
|
|
"""The receiver_is_off_zulip logic used to determine whether a user
|
|
|
|
has no active client suffers from a somewhat fundamental race
|
|
|
|
condition. If the client is no longer on the Internet,
|
|
|
|
receiver_is_off_zulip will still return true for
|
2018-12-05 23:48:40 +01:00
|
|
|
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS, until the queue is
|
2017-10-03 19:54:54 +02:00
|
|
|
garbage-collected. This would cause us to reliably miss
|
|
|
|
push/email notifying users for messages arriving during the
|
2018-12-05 23:48:40 +01:00
|
|
|
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS after they suspend their laptop (for
|
2017-10-03 19:54:54 +02:00
|
|
|
example). We address this by, when the queue is garbage-collected
|
|
|
|
at the end of those 10 minutes, checking to see if it's the last
|
|
|
|
one, and if so, potentially triggering notifications to the user
|
2018-12-05 23:48:40 +01:00
|
|
|
at that time, resulting in at most a DEFAULT_EVENT_QUEUE_TIMEOUT_SECS
|
2017-10-03 19:54:54 +02:00
|
|
|
delay in the arrival of their notifications.
|
|
|
|
|
|
|
|
As Zulip's APIs get more popular and the mobile apps start using
|
|
|
|
long-lived event queues for perf optimization, future versions of
|
|
|
|
this will likely need to replace checking `last_for_client` with
|
|
|
|
something more complicated, so that we only consider clients like
|
|
|
|
web browsers, not the mobile apps or random API scripts.
|
|
|
|
"""
|
2014-04-24 02:16:53 +02:00
|
|
|
# Only process missedmessage hook when the last queue for a
|
|
|
|
# client has been garbage collected
|
|
|
|
if not last_for_client:
|
|
|
|
return
|
|
|
|
|
2017-09-27 23:50:35 +02:00
|
|
|
for event in client.event_queue.contents():
|
2017-09-21 13:56:57 +02:00
|
|
|
if event['type'] != 'message':
|
2014-04-24 02:16:53 +02:00
|
|
|
continue
|
2017-09-21 13:56:57 +02:00
|
|
|
assert 'flags' in event
|
2014-04-24 02:16:53 +02:00
|
|
|
|
2017-12-23 00:13:11 +01:00
|
|
|
flags = event.get('flags')
|
|
|
|
|
2017-09-21 13:56:57 +02:00
|
|
|
mentioned = 'mentioned' in flags and 'read' not in flags
|
2017-09-27 23:49:05 +02:00
|
|
|
private_message = event['message']['type'] == 'private'
|
2017-09-28 00:04:32 +02:00
|
|
|
# stream_push_notify is set in process_message_event.
|
|
|
|
stream_push_notify = event.get('stream_push_notify', False)
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_email_notify = event.get('stream_email_notify', False)
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_notify = (event.get('wildcard_mention_notify', False) and
|
|
|
|
'read' not in flags and 'wildcard_mentioned' in flags)
|
2017-09-28 00:04:32 +02:00
|
|
|
|
2017-09-21 14:04:58 +02:00
|
|
|
stream_name = None
|
2017-09-28 00:04:32 +02:00
|
|
|
if not private_message:
|
|
|
|
stream_name = event['message']['display_recipient']
|
|
|
|
|
|
|
|
# Since one is by definition idle, we don't need to check always_push_notify
|
2017-09-21 14:04:58 +02:00
|
|
|
always_push_notify = False
|
|
|
|
# Since we just GC'd the last event queue, the user is definitely idle.
|
|
|
|
idle = True
|
|
|
|
|
|
|
|
message_id = event['message']['id']
|
2017-10-18 06:54:03 +02:00
|
|
|
# Pass on the information on whether a push or email notification was already sent.
|
|
|
|
already_notified = dict(
|
|
|
|
push_notified = event.get("push_notified", False),
|
|
|
|
email_notified = event.get("email_notified", False),
|
|
|
|
)
|
2017-09-21 14:04:58 +02:00
|
|
|
maybe_enqueue_notifications(user_profile_id, message_id, private_message, mentioned,
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_notify, stream_push_notify,
|
|
|
|
stream_email_notify, stream_name,
|
2017-11-21 07:24:24 +01:00
|
|
|
always_push_notify, idle, already_notified)
|
2014-04-24 02:16:53 +02:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def receiver_is_off_zulip(user_profile_id: int) -> bool:
|
2014-04-24 02:16:53 +02:00
|
|
|
# If a user has no message-receiving event queues, they've got no open zulip
|
|
|
|
# session so we notify them
|
|
|
|
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
|
|
|
|
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
|
|
|
|
off_zulip = len(message_event_queues) == 0
|
2017-09-05 20:13:05 +02:00
|
|
|
return off_zulip
|
|
|
|
|
2017-12-22 13:44:37 +01:00
|
|
|
def maybe_enqueue_notifications(user_profile_id: int, message_id: int, private_message: bool,
|
2019-09-03 23:27:45 +02:00
|
|
|
mentioned: bool,
|
|
|
|
wildcard_mention_notify: bool,
|
2019-08-26 04:40:07 +02:00
|
|
|
stream_push_notify: bool,
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_email_notify: bool, stream_name: Optional[str],
|
2017-12-22 13:44:37 +01:00
|
|
|
always_push_notify: bool, idle: bool,
|
|
|
|
already_notified: Dict[str, bool]) -> Dict[str, bool]:
|
2017-09-27 20:27:04 +02:00
|
|
|
"""This function has a complete unit test suite in
|
|
|
|
`test_enqueue_notifications` that should be expanded as we add
|
|
|
|
more features here."""
|
2017-09-21 13:52:46 +02:00
|
|
|
notified = dict() # type: Dict[str, bool]
|
|
|
|
|
2019-08-26 04:40:07 +02:00
|
|
|
if (idle or always_push_notify) and (private_message or mentioned or
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_notify or stream_push_notify):
|
2017-09-21 13:52:46 +02:00
|
|
|
notice = build_offline_notification(user_profile_id, message_id)
|
2017-10-19 06:37:35 +02:00
|
|
|
if private_message:
|
|
|
|
notice['trigger'] = 'private_message'
|
|
|
|
elif mentioned:
|
|
|
|
notice['trigger'] = 'mentioned'
|
2019-09-03 23:27:45 +02:00
|
|
|
elif wildcard_mention_notify:
|
2019-08-26 04:40:07 +02:00
|
|
|
notice['trigger'] = 'wildcard_mentioned'
|
2017-10-19 06:37:35 +02:00
|
|
|
elif stream_push_notify:
|
|
|
|
notice['trigger'] = 'stream_push_notify'
|
|
|
|
else:
|
|
|
|
raise AssertionError("Unknown notification trigger!")
|
2017-09-21 13:52:46 +02:00
|
|
|
notice['stream_name'] = stream_name
|
2017-10-18 06:54:03 +02:00
|
|
|
if not already_notified.get("push_notified"):
|
2018-08-10 21:41:08 +02:00
|
|
|
queue_json_publish("missedmessage_mobile_notifications", notice)
|
2017-10-18 06:54:03 +02:00
|
|
|
notified['push_notified'] = True
|
2017-09-21 13:52:46 +02:00
|
|
|
|
|
|
|
# Send missed_message emails if a private message or a
|
|
|
|
# mention. Eventually, we'll add settings to allow email
|
|
|
|
# notifications to match the model of push notifications
|
|
|
|
# above.
|
2019-09-03 23:27:45 +02:00
|
|
|
if idle and (private_message or mentioned or wildcard_mention_notify or stream_email_notify):
|
event_queue: Call build_offline_notification unconditionally.
Previously, maybe_enqueue_notifications had this very subtle logic,
where it set the notice variable only inside the block for push
notifications, but then also used it inside the block for email
notifications.
This "worked", because previously the conditions for push
notifications were always true if the conditions for email
notifications were, but the code was unnecessarily confusing. The
only good reason to write it this way is if build_offline_notification
was expensive; in fact, the most expensive thing it does is calling
time.time(), so that reason does not apply here.
This was further confusing, in that in the original logic, we relied
on the fact that push notification code path edited the "notice"
dictionary for further processing.
Instead, we just call it separately and setup the data separately in
each code path.
2018-07-14 08:18:25 +02:00
|
|
|
notice = build_offline_notification(user_profile_id, message_id)
|
2018-07-14 08:23:27 +02:00
|
|
|
if private_message:
|
|
|
|
notice['trigger'] = 'private_message'
|
|
|
|
elif mentioned:
|
|
|
|
notice['trigger'] = 'mentioned'
|
2019-09-03 23:27:45 +02:00
|
|
|
elif wildcard_mention_notify:
|
2019-08-26 04:40:07 +02:00
|
|
|
notice['trigger'] = 'wildcard_mentioned'
|
2017-11-21 07:24:24 +01:00
|
|
|
elif stream_email_notify:
|
|
|
|
notice['trigger'] = 'stream_email_notify'
|
2018-07-14 08:23:27 +02:00
|
|
|
else:
|
|
|
|
raise AssertionError("Unknown notification trigger!")
|
event_queue: Call build_offline_notification unconditionally.
Previously, maybe_enqueue_notifications had this very subtle logic,
where it set the notice variable only inside the block for push
notifications, but then also used it inside the block for email
notifications.
This "worked", because previously the conditions for push
notifications were always true if the conditions for email
notifications were, but the code was unnecessarily confusing. The
only good reason to write it this way is if build_offline_notification
was expensive; in fact, the most expensive thing it does is calling
time.time(), so that reason does not apply here.
This was further confusing, in that in the original logic, we relied
on the fact that push notification code path edited the "notice"
dictionary for further processing.
Instead, we just call it separately and setup the data separately in
each code path.
2018-07-14 08:18:25 +02:00
|
|
|
notice['stream_name'] = stream_name
|
2017-10-18 06:54:03 +02:00
|
|
|
if not already_notified.get("email_notified"):
|
|
|
|
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
|
|
|
|
notified['email_notified'] = True
|
2017-09-21 13:52:46 +02:00
|
|
|
|
2017-09-28 00:10:18 +02:00
|
|
|
return notified
|
2017-09-21 13:52:46 +02:00
|
|
|
|
2017-10-26 22:10:52 +02:00
|
|
|
ClientInfo = TypedDict('ClientInfo', {
|
|
|
|
'client': ClientDescriptor,
|
|
|
|
'flags': Optional[Iterable[str]],
|
|
|
|
'is_sender': bool,
|
|
|
|
})
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def get_client_info_for_message_event(event_template: Mapping[str, Any],
|
|
|
|
users: Iterable[Mapping[str, Any]]) -> Dict[str, ClientInfo]:
|
2017-10-26 22:10:52 +02:00
|
|
|
|
|
|
|
'''
|
|
|
|
Return client info for all the clients interested in a message.
|
|
|
|
This basically includes clients for users who are recipients
|
|
|
|
of the message, with some nuances for bots that auto-subscribe
|
|
|
|
to all streams, plus users who may be mentioned, etc.
|
|
|
|
'''
|
|
|
|
|
|
|
|
send_to_clients = {} # type: Dict[str, ClientInfo]
|
|
|
|
|
|
|
|
sender_queue_id = event_template.get('sender_queue_id', None) # type: Optional[str]
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def is_sender_client(client: ClientDescriptor) -> bool:
|
2017-10-26 22:10:52 +02:00
|
|
|
return (sender_queue_id is not None) and client.event_queue.id == sender_queue_id
|
|
|
|
|
|
|
|
# If we're on a public stream, look for clients (typically belonging to
|
|
|
|
# bots) that are registered to get events for ALL streams.
|
|
|
|
if 'stream_name' in event_template and not event_template.get("invite_only"):
|
|
|
|
realm_id = event_template['realm_id']
|
|
|
|
for client in get_client_descriptors_for_realm_all_streams(realm_id):
|
|
|
|
send_to_clients[client.event_queue.id] = dict(
|
|
|
|
client=client,
|
2018-03-28 21:42:06 +02:00
|
|
|
flags=[],
|
2017-10-26 22:10:52 +02:00
|
|
|
is_sender=is_sender_client(client)
|
|
|
|
)
|
|
|
|
|
|
|
|
for user_data in users:
|
|
|
|
user_profile_id = user_data['id'] # type: int
|
|
|
|
flags = user_data.get('flags', []) # type: Iterable[str]
|
|
|
|
|
|
|
|
for client in get_client_descriptors_for_user(user_profile_id):
|
|
|
|
send_to_clients[client.event_queue.id] = dict(
|
|
|
|
client=client,
|
|
|
|
flags=flags,
|
|
|
|
is_sender=is_sender_client(client)
|
|
|
|
)
|
|
|
|
|
|
|
|
return send_to_clients
|
|
|
|
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def process_message_event(event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]) -> None:
|
2018-11-30 00:48:13 +01:00
|
|
|
"""See
|
|
|
|
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
|
|
|
|
for high-level documentation on this subsystem.
|
|
|
|
"""
|
2017-10-26 22:10:52 +02:00
|
|
|
send_to_clients = get_client_info_for_message_event(event_template, users)
|
|
|
|
|
2017-10-07 17:59:19 +02:00
|
|
|
presence_idle_user_ids = set(event_template.get('presence_idle_user_ids', []))
|
2017-10-31 03:04:48 +01:00
|
|
|
wide_dict = event_template['message_dict'] # type: Dict[str, Any]
|
2017-10-20 21:34:05 +02:00
|
|
|
|
2017-10-31 03:04:48 +01:00
|
|
|
sender_id = wide_dict['sender_id'] # type: int
|
|
|
|
message_id = wide_dict['id'] # type: int
|
|
|
|
message_type = wide_dict['type'] # type: str
|
2018-05-11 01:43:30 +02:00
|
|
|
sending_client = wide_dict['client'] # type: str
|
2017-10-20 21:34:05 +02:00
|
|
|
|
2017-10-31 03:02:23 +01:00
|
|
|
@cachify
|
2017-11-27 11:09:23 +01:00
|
|
|
def get_client_payload(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
|
2017-10-31 03:02:23 +01:00
|
|
|
dct = copy.deepcopy(wide_dict)
|
2019-11-21 02:31:11 +01:00
|
|
|
|
|
|
|
# Temporary transitional code: Zulip servers that have message
|
|
|
|
# events in their event queues and upgrade to the new version
|
|
|
|
# that expects sender_delivery_email in these events will
|
|
|
|
# throw errors processing events. We can remove this block
|
|
|
|
# once we don't expect anyone to be directly upgrading from
|
|
|
|
# 2.0.x to the latest Zulip.
|
|
|
|
if 'sender_delivery_email' not in dct: # nocoverage
|
|
|
|
dct['sender_delivery_email'] = dct['sender_email']
|
|
|
|
|
2017-10-31 03:02:23 +01:00
|
|
|
MessageDict.finalize_payload(dct, apply_markdown, client_gravatar)
|
|
|
|
return dct
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
# Extra user-specific data to include
|
2017-06-04 12:15:23 +02:00
|
|
|
extra_user_data = {} # type: Dict[int, Any]
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
for user_data in users:
|
2017-06-04 12:15:23 +02:00
|
|
|
user_profile_id = user_data['id'] # type: int
|
|
|
|
flags = user_data.get('flags', []) # type: Iterable[str]
|
2014-04-24 02:16:53 +02:00
|
|
|
|
2017-07-05 11:36:24 +02:00
|
|
|
# If the recipient was offline and the message was a single or group PM to them
|
|
|
|
# or they were @-notified potentially notify more immediately
|
2017-09-14 14:30:16 +02:00
|
|
|
private_message = message_type == "private" and user_profile_id != sender_id
|
2017-09-21 13:56:57 +02:00
|
|
|
mentioned = 'mentioned' in flags and 'read' not in flags
|
2017-08-17 16:55:32 +02:00
|
|
|
stream_push_notify = user_data.get('stream_push_notify', False)
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_email_notify = user_data.get('stream_email_notify', False)
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_notify = (user_data.get('wildcard_mention_notify', False) and
|
|
|
|
'wildcard_mentioned' in flags and 'read' not in flags)
|
2017-09-05 19:55:14 +02:00
|
|
|
|
2017-09-15 09:43:42 +02:00
|
|
|
# We first check if a message is potentially mentionable,
|
|
|
|
# since receiver_is_off_zulip is somewhat expensive.
|
2019-09-03 23:27:45 +02:00
|
|
|
if (private_message or mentioned or wildcard_mention_notify
|
2019-08-26 04:40:07 +02:00
|
|
|
or stream_push_notify or stream_email_notify):
|
2017-10-07 17:59:19 +02:00
|
|
|
idle = receiver_is_off_zulip(user_profile_id) or (user_profile_id in presence_idle_user_ids)
|
2017-09-05 19:55:14 +02:00
|
|
|
always_push_notify = user_data.get('always_push_notify', False)
|
2017-09-21 13:52:46 +02:00
|
|
|
stream_name = event_template.get('stream_name')
|
|
|
|
result = maybe_enqueue_notifications(user_profile_id, message_id, private_message,
|
2019-09-03 23:27:45 +02:00
|
|
|
mentioned,
|
|
|
|
wildcard_mention_notify,
|
2019-08-26 04:40:07 +02:00
|
|
|
stream_push_notify, stream_email_notify,
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_name, always_push_notify, idle, {})
|
2017-09-28 00:04:32 +02:00
|
|
|
result['stream_push_notify'] = stream_push_notify
|
2017-11-21 07:24:24 +01:00
|
|
|
result['stream_email_notify'] = stream_email_notify
|
2019-09-03 23:27:45 +02:00
|
|
|
result['wildcard_mention_notify'] = wildcard_mention_notify
|
2017-09-28 00:10:18 +02:00
|
|
|
extra_user_data[user_profile_id] = result
|
2014-04-24 02:16:53 +02:00
|
|
|
|
2017-09-27 10:09:12 +02:00
|
|
|
for client_data in send_to_clients.values():
|
2014-04-24 02:16:53 +02:00
|
|
|
client = client_data['client']
|
|
|
|
flags = client_data['flags']
|
2017-06-04 12:15:23 +02:00
|
|
|
is_sender = client_data.get('is_sender', False) # type: bool
|
|
|
|
extra_data = extra_user_data.get(client.user_profile_id, None) # type: Optional[Mapping[str, bool]]
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
if not client.accepts_messages():
|
|
|
|
# The actual check is the accepts_event() check below;
|
|
|
|
# this line is just an optimization to avoid copying
|
|
|
|
# message data unnecessarily
|
|
|
|
continue
|
|
|
|
|
2017-10-31 18:36:18 +01:00
|
|
|
message_dict = get_client_payload(client.apply_markdown, client.client_gravatar)
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
# Make sure Zephyr mirroring bots know whether stream is invite-only
|
2014-01-28 18:13:41 +01:00
|
|
|
if "mirror" in client.client_type_name and event_template.get("invite_only"):
|
2014-04-24 02:16:53 +02:00
|
|
|
message_dict = message_dict.copy()
|
|
|
|
message_dict["invite_only_stream"] = True
|
|
|
|
|
2017-06-04 12:15:23 +02:00
|
|
|
user_event = dict(type='message', message=message_dict, flags=flags) # type: Dict[str, Any]
|
2014-04-24 02:16:53 +02:00
|
|
|
if extra_data is not None:
|
|
|
|
user_event.update(extra_data)
|
|
|
|
|
|
|
|
if is_sender:
|
2017-07-14 19:30:23 +02:00
|
|
|
local_message_id = event_template.get('local_id', None)
|
|
|
|
if local_message_id is not None:
|
|
|
|
user_event["local_message_id"] = local_message_id
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
if not client.accepts_event(user_event):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# The below prevents (Zephyr) mirroring loops.
|
|
|
|
if ('mirror' in sending_client and
|
2016-12-03 18:19:09 +01:00
|
|
|
sending_client.lower() == client.client_type_name.lower()):
|
2014-04-24 02:16:53 +02:00
|
|
|
continue
|
2019-08-06 22:34:52 +02:00
|
|
|
|
2014-04-24 02:16:53 +02:00
|
|
|
client.add_event(user_event)
|
|
|
|
|
2020-02-03 17:09:18 +01:00
|
|
|
def process_presence_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
|
2020-03-03 13:05:09 +01:00
|
|
|
if 'user_id' not in event:
|
|
|
|
# We only recently added `user_id` to presence data.
|
|
|
|
# Any old events in our queue can just be dropped,
|
|
|
|
# since presence events are pretty ephemeral in nature.
|
|
|
|
logging.warning('Dropping some obsolete presence events after upgrade.')
|
|
|
|
|
2020-02-03 17:09:18 +01:00
|
|
|
slim_event = dict(
|
|
|
|
type='presence',
|
|
|
|
user_id=event['user_id'],
|
|
|
|
server_timestamp=event['server_timestamp'],
|
|
|
|
presence=event['presence'],
|
|
|
|
)
|
|
|
|
|
|
|
|
legacy_event = dict(
|
|
|
|
type='presence',
|
|
|
|
user_id=event['user_id'],
|
|
|
|
email=event['email'],
|
|
|
|
server_timestamp=event['server_timestamp'],
|
|
|
|
presence=event['presence'],
|
|
|
|
)
|
|
|
|
|
|
|
|
for user_profile_id in users:
|
|
|
|
for client in get_client_descriptors_for_user(user_profile_id):
|
|
|
|
if client.accepts_event(event):
|
|
|
|
if client.slim_presence:
|
|
|
|
client.add_event(slim_event)
|
|
|
|
else:
|
|
|
|
client.add_event(legacy_event)
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def process_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
|
2014-04-24 02:16:53 +02:00
|
|
|
for user_profile_id in users:
|
|
|
|
for client in get_client_descriptors_for_user(user_profile_id):
|
|
|
|
if client.accepts_event(event):
|
event_queue: Fix confusing event_queue.push interface.
In e3ad9baf1d1dd862b4e3cd1aae7b3c3067d55752, we introduced yet another
bug where we incorrectly shared event dictionaries between multiple
queues.
Fortunately, the logging that reports on "event was not in the queue"
issues worked and detected this on chat.zulip.org, but this is a clear
indication that the comments we have around this system were not
sufficient to produce correct behavior.
We fix this by changing event_queue.push, the code that mutates the
event dictionaries, to do the shallow copies itself. The only
downside here is process_message_event, a relatively low-traffic code
path, does an extra per-queue dictionary copy. Given that presence,
heartbeat, and message reading events are likely more traffic and
dealing with HTTP is likely much more expensive than a dictionary
copy, this probably doesn't matter performance-wise.
(And if profiling later finds it is, there are potential workarounds
like passing a skip_copy argument we can do).
2020-02-05 20:43:09 +01:00
|
|
|
client.add_event(event)
|
2014-04-24 02:16:53 +02:00
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def process_message_update_event(event_template: Mapping[str, Any],
|
|
|
|
users: Iterable[Mapping[str, Any]]) -> None:
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
prior_mention_user_ids = set(event_template.get('prior_mention_user_ids', []))
|
|
|
|
mention_user_ids = set(event_template.get('mention_user_ids', []))
|
2017-10-07 17:59:19 +02:00
|
|
|
presence_idle_user_ids = set(event_template.get('presence_idle_user_ids', []))
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
stream_push_user_ids = set(event_template.get('stream_push_user_ids', []))
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_email_user_ids = set(event_template.get('stream_email_user_ids', []))
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_user_ids = set(event_template.get('wildcard_mention_user_ids', []))
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
push_notify_user_ids = set(event_template.get('push_notify_user_ids', []))
|
|
|
|
|
|
|
|
stream_name = event_template.get('stream_name')
|
|
|
|
message_id = event_template['message_id']
|
|
|
|
|
|
|
|
for user_data in users:
|
|
|
|
user_profile_id = user_data['id']
|
|
|
|
user_event = dict(event_template) # shallow copy, but deep enough for our needs
|
|
|
|
for key in user_data.keys():
|
|
|
|
if key != "id":
|
|
|
|
user_event[key] = user_data[key]
|
2019-08-26 04:40:07 +02:00
|
|
|
wildcard_mentioned = 'wildcard_mentioned' in user_event['flags']
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_notify = wildcard_mentioned and (
|
|
|
|
user_profile_id in wildcard_mention_user_ids)
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
|
|
|
|
maybe_enqueue_notifications_for_message_update(
|
|
|
|
user_profile_id=user_profile_id,
|
|
|
|
message_id=message_id,
|
|
|
|
stream_name=stream_name,
|
|
|
|
prior_mention_user_ids=prior_mention_user_ids,
|
|
|
|
mention_user_ids=mention_user_ids,
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_notify = wildcard_mention_notify,
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
presence_idle_user_ids=presence_idle_user_ids,
|
|
|
|
stream_push_user_ids=stream_push_user_ids,
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_email_user_ids=stream_email_user_ids,
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
push_notify_user_ids=push_notify_user_ids,
|
|
|
|
)
|
|
|
|
|
|
|
|
for client in get_client_descriptors_for_user(user_profile_id):
|
|
|
|
if client.accepts_event(user_event):
|
2019-08-06 22:34:52 +02:00
|
|
|
# We need to do another shallow copy, or we risk
|
|
|
|
# sending the same event to multiple clients.
|
event_queue: Fix confusing event_queue.push interface.
In e3ad9baf1d1dd862b4e3cd1aae7b3c3067d55752, we introduced yet another
bug where we incorrectly shared event dictionaries between multiple
queues.
Fortunately, the logging that reports on "event was not in the queue"
issues worked and detected this on chat.zulip.org, but this is a clear
indication that the comments we have around this system were not
sufficient to produce correct behavior.
We fix this by changing event_queue.push, the code that mutates the
event dictionaries, to do the shallow copies itself. The only
downside here is process_message_event, a relatively low-traffic code
path, does an extra per-queue dictionary copy. Given that presence,
heartbeat, and message reading events are likely more traffic and
dealing with HTTP is likely much more expensive than a dictionary
copy, this probably doesn't matter performance-wise.
(And if profiling later finds it is, there are potential workarounds
like passing a skip_copy argument we can do).
2020-02-05 20:43:09 +01:00
|
|
|
client.add_event(user_event)
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
|
2017-12-22 13:44:37 +01:00
|
|
|
def maybe_enqueue_notifications_for_message_update(user_profile_id: UserProfile,
|
|
|
|
message_id: int,
|
|
|
|
stream_name: str,
|
|
|
|
prior_mention_user_ids: Set[int],
|
|
|
|
mention_user_ids: Set[int],
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_notify: bool,
|
2017-12-22 13:44:37 +01:00
|
|
|
presence_idle_user_ids: Set[int],
|
|
|
|
stream_push_user_ids: Set[int],
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_email_user_ids: Set[int],
|
2017-12-22 13:44:37 +01:00
|
|
|
push_notify_user_ids: Set[int]) -> None:
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
private_message = (stream_name is None)
|
|
|
|
|
|
|
|
if private_message:
|
|
|
|
# We don't do offline notifications for PMs, because
|
|
|
|
# we already notified the user of the original message
|
|
|
|
return
|
|
|
|
|
|
|
|
if (user_profile_id in prior_mention_user_ids):
|
|
|
|
# Don't spam people with duplicate mentions. This is
|
|
|
|
# especially important considering that most message
|
|
|
|
# edits are simple typo corrections.
|
2019-08-26 04:40:07 +02:00
|
|
|
#
|
|
|
|
# Note that prior_mention_user_ids contains users who received
|
|
|
|
# a wildcard mention as well as normal mentions.
|
2019-09-03 23:27:45 +02:00
|
|
|
#
|
|
|
|
# TODO: Ideally, that would mean that we exclude here cases
|
|
|
|
# where user_profile.wildcard_mentions_notify=False and have
|
|
|
|
# those still send a notification. However, we don't have the
|
|
|
|
# data to determine whether or not that was the case at the
|
|
|
|
# time the original message was sent, so we can't do that
|
|
|
|
# without extending the UserMessage data model.
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
stream_push_notify = (user_profile_id in stream_push_user_ids)
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_email_notify = (user_profile_id in stream_email_user_ids)
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
|
2017-11-21 07:24:24 +01:00
|
|
|
if stream_push_notify or stream_email_notify:
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
# Currently we assume that if this flag is set to True, then
|
|
|
|
# the user already was notified about the earlier message,
|
|
|
|
# so we short circuit. We may handle this more rigorously
|
|
|
|
# in the future by looking at something like an AlreadyNotified
|
|
|
|
# model.
|
|
|
|
return
|
|
|
|
|
|
|
|
# We can have newly mentioned people in an updated message.
|
|
|
|
mentioned = (user_profile_id in mention_user_ids)
|
|
|
|
|
|
|
|
always_push_notify = user_profile_id in push_notify_user_ids
|
|
|
|
|
|
|
|
idle = (user_profile_id in presence_idle_user_ids) or \
|
|
|
|
receiver_is_off_zulip(user_profile_id)
|
|
|
|
|
|
|
|
maybe_enqueue_notifications(
|
|
|
|
user_profile_id=user_profile_id,
|
|
|
|
message_id=message_id,
|
|
|
|
private_message=private_message,
|
|
|
|
mentioned=mentioned,
|
2019-09-03 23:27:45 +02:00
|
|
|
wildcard_mention_notify=wildcard_mention_notify,
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
stream_push_notify=stream_push_notify,
|
2017-11-21 07:24:24 +01:00
|
|
|
stream_email_notify=stream_email_notify,
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
stream_name=stream_name,
|
|
|
|
always_push_notify=always_push_notify,
|
|
|
|
idle=idle,
|
2017-10-18 06:54:03 +02:00
|
|
|
already_notified={},
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
)
|
|
|
|
|
2017-11-27 11:09:23 +01:00
|
|
|
def process_notification(notice: Mapping[str, Any]) -> None:
|
2017-06-04 12:15:23 +02:00
|
|
|
event = notice['event'] # type: Mapping[str, Any]
|
2017-10-06 08:11:16 +02:00
|
|
|
users = notice['users'] # type: Union[List[int], List[Mapping[str, Any]]]
|
2017-02-07 02:51:58 +01:00
|
|
|
start_time = time.time()
|
2020-02-03 17:09:18 +01:00
|
|
|
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
if event['type'] == "message":
|
2016-07-03 15:58:59 +02:00
|
|
|
process_message_event(event, cast(Iterable[Mapping[str, Any]], users))
|
Notify offline users about edited stream messages.
We now do push notifications and missed message emails
for offline users who are subscribed to the stream for
a message that has been edited, but we short circuit
the offline-notification logic for any user who presumably
would have already received a notification on the original
message.
This effectively boils down to sending notifications to newly
mentioned users. The motivating use case here is that you
forget to mention somebody in a message, and then you edit
the message to mention the person. If they are offline, they
will now get pushed notifications and missed message emails,
with some minor caveats.
We try to mostly use the same techniques here as the
send-message code path, and we share common code with the
send-message path once we get to the Tornado layer and call
maybe_enqueue_notifications.
The major places where we differ are in a function called
maybe_enqueue_notifications_for_message_update, and the top
of that function short circuits a bunch of cases where we
can mostly assume that the original message had an offline
notification.
We can expect a couple changes in the future:
* Requirements may change here, and it might make sense
to send offline notifications on the update side even
in circumstances where the original message had a
notification.
* We may track more notifications in a DB model, which
may simplify our short-circuit logic.
In the view/action layer, we already had two separate codepaths
for send-message and update-message, but this mostly echoes
what the send-message path does in terms of collecting data
about recipients.
2017-10-03 16:25:12 +02:00
|
|
|
elif event['type'] == "update_message":
|
|
|
|
process_message_update_event(event, cast(Iterable[Mapping[str, Any]], users))
|
2020-03-01 19:59:20 +01:00
|
|
|
elif event['type'] == "delete_message" and isinstance(users[0], dict):
|
|
|
|
# do_delete_messages used to send events with users in dict format {"id": <int>}
|
|
|
|
# This block is here for compatibility with events in that format still in the queue
|
|
|
|
# at the time of upgrade.
|
|
|
|
# TODO: Remove this block in release >= 2.3.
|
|
|
|
user_ids = [user['id'] for user in cast(Iterable[Mapping[str, int]], users)]
|
|
|
|
process_event(event, user_ids)
|
2020-02-03 17:09:18 +01:00
|
|
|
elif event['type'] == "presence":
|
|
|
|
process_presence_event(event, cast(Iterable[int], users))
|
2014-04-24 02:16:53 +02:00
|
|
|
else:
|
2016-07-03 15:58:59 +02:00
|
|
|
process_event(event, cast(Iterable[int], users))
|
2017-02-07 02:51:58 +01:00
|
|
|
logging.debug("Tornado: Event %s for %s users took %sms" % (
|
|
|
|
event['type'], len(users), int(1000 * (time.time() - start_time))))
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
# Runs in the Django process to send a notification to Tornado.
|
|
|
|
#
|
|
|
|
# We use JSON rather than bare form parameters, so that we can represent
|
|
|
|
# different types and for compatibility with non-HTTP transports.
|
|
|
|
|
2018-11-02 23:51:05 +01:00
|
|
|
def send_notification_http(realm: Realm, data: Mapping[str, Any]) -> None:
|
2014-04-24 02:16:53 +02:00
|
|
|
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
|
2018-11-02 23:51:05 +01:00
|
|
|
tornado_uri = get_tornado_uri(realm)
|
|
|
|
requests_client.post(tornado_uri + '/notify_tornado', data=dict(
|
2017-01-24 07:06:13 +01:00
|
|
|
data = ujson.dumps(data),
|
|
|
|
secret = settings.SHARED_SECRET))
|
2014-04-24 02:16:53 +02:00
|
|
|
else:
|
|
|
|
process_notification(data)
|
|
|
|
|
2018-11-02 23:33:54 +01:00
|
|
|
def send_event(realm: Realm, event: Mapping[str, Any],
|
2017-11-27 11:09:23 +01:00
|
|
|
users: Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None:
|
2016-06-05 21:32:00 +02:00
|
|
|
"""`users` is a list of user IDs, or in the case of `message` type
|
|
|
|
events, a list of dicts describing the users and metadata about
|
|
|
|
the user/message pair."""
|
2018-11-03 00:06:13 +01:00
|
|
|
port = get_tornado_port(realm)
|
|
|
|
queue_json_publish(notify_tornado_queue_name(port),
|
2016-06-05 21:32:00 +02:00
|
|
|
dict(event=event, users=users),
|
2018-11-02 23:51:05 +01:00
|
|
|
lambda *args, **kwargs: send_notification_http(realm, *args, **kwargs))
|