2017-02-12 01:59:28 +01:00
|
|
|
# See http://zulip.readthedocs.io/en/latest/events-system.html for
|
|
|
|
# high-level documentation on how this system works.
|
2013-04-23 18:51:17 +02:00
|
|
|
from __future__ import absolute_import
|
2016-12-27 07:09:35 +01:00
|
|
|
from typing import cast, AbstractSet, Any, Optional, Iterable, Sequence, Mapping, MutableMapping, Callable, Union, Text
|
2013-04-23 18:51:17 +02:00
|
|
|
|
2016-05-25 15:02:02 +02:00
|
|
|
from django.utils.translation import ugettext as _
|
2013-03-26 18:06:00 +01:00
|
|
|
from django.conf import settings
|
2017-02-25 21:02:13 +01:00
|
|
|
from django.utils import timezone
|
2013-03-26 18:06:00 +01:00
|
|
|
from collections import deque
|
2014-04-24 02:16:53 +02:00
|
|
|
import datetime
|
2013-03-20 23:03:41 +01:00
|
|
|
import os
|
2013-03-26 18:06:00 +01:00
|
|
|
import time
|
|
|
|
import socket
|
|
|
|
import logging
|
2013-06-18 23:55:55 +02:00
|
|
|
import ujson
|
2013-03-14 23:21:53 +01:00
|
|
|
import requests
|
2013-03-20 23:03:41 +01:00
|
|
|
import atexit
|
|
|
|
import sys
|
|
|
|
import signal
|
2016-04-04 01:08:01 +02:00
|
|
|
import tornado.autoreload
|
2016-10-18 02:57:41 +02:00
|
|
|
import tornado.ioloop
|
2013-03-22 23:25:37 +01:00
|
|
|
import random
|
2013-11-07 00:34:50 +01:00
|
|
|
import traceback
|
2016-07-03 15:58:59 +02:00
|
|
|
from zerver.models import UserProfile, Client
|
2016-05-29 16:52:55 +02:00
|
|
|
from zerver.decorator import RespondAsynchronously
|
2016-11-27 06:36:06 +01:00
|
|
|
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
|
2016-03-21 00:31:18 +01:00
|
|
|
finish_handler, handler_stats_string
|
2013-07-29 23:03:31 +02:00
|
|
|
from zerver.lib.utils import statsd
|
|
|
|
from zerver.middleware import async_request_restart
|
2013-12-10 16:28:16 +01:00
|
|
|
from zerver.lib.narrow import build_narrow_filter
|
2014-04-24 02:16:53 +02:00
|
|
|
from zerver.lib.queue import queue_json_publish
|
2016-05-29 16:52:55 +02:00
|
|
|
from zerver.lib.request import JsonableError
|
2014-04-24 02:16:53 +02:00
|
|
|
from zerver.lib.timestamp import timestamp_to_datetime
|
2016-11-27 06:14:48 +01:00
|
|
|
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
|
2013-11-22 20:30:32 +01:00
|
|
|
import copy
|
2016-03-11 10:57:29 +01:00
|
|
|
import six
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2016-08-30 12:03:01 +02:00
|
|
|
requests_client = requests.Session()
|
2016-08-31 10:00:32 +02:00
|
|
|
for host in ['127.0.0.1', 'localhost']:
|
|
|
|
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
|
|
|
|
# This seems like the only working solution to ignore proxy in
|
|
|
|
# requests library.
|
|
|
|
requests_client.trust_env = False
|
2016-08-30 12:03:01 +02:00
|
|
|
|
2013-04-18 22:17:48 +02:00
|
|
|
# The idle timeout used to be a week, but we found that in that
|
|
|
|
# situation, queues from dead browser sessions would grow quite large
|
|
|
|
# due to the accumulation of message data in those queues.
|
|
|
|
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
|
|
|
|
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
|
2013-08-05 22:09:12 +02:00
|
|
|
|
|
|
|
# Capped limit for how long a client can request an event queue
|
|
|
|
# to live
|
|
|
|
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
|
|
|
|
|
2013-03-28 22:39:43 +01:00
|
|
|
# The heartbeats effectively act as a server-side timeout for
|
|
|
|
# get_events(). The actual timeout value is randomized for each
|
|
|
|
# client connection based on the below value. We ensure that the
|
|
|
|
# maximum timeout value is 55 seconds, to deal with crappy home
|
|
|
|
# wireless routers that kill "inactive" http connections.
|
|
|
|
HEARTBEAT_MIN_FREQ_SECS = 45
|
2013-03-26 18:06:00 +01:00
|
|
|
|
|
|
|
class ClientDescriptor(object):
|
2014-01-27 23:57:45 +01:00
|
|
|
def __init__(self, user_profile_id, user_profile_email, realm_id, event_queue,
|
2014-01-28 18:13:41 +01:00
|
|
|
event_types, client_type_name, apply_markdown=True,
|
2014-01-27 23:57:45 +01:00
|
|
|
all_public_streams=False, lifespan_secs=0, narrow=[]):
|
2016-12-27 07:09:35 +01:00
|
|
|
# type: (int, Text, int, EventQueue, Optional[Sequence[str]], Text, bool, bool, int, Iterable[Sequence[Text]]) -> None
|
2013-11-19 23:51:32 +01:00
|
|
|
# These objects are serialized on shutdown and restored on restart.
|
2013-10-17 23:51:25 +02:00
|
|
|
# If fields are added or semantics are changed, temporary code must be
|
2013-11-19 23:51:32 +01:00
|
|
|
# added to load_event_queues() to update the restored objects.
|
|
|
|
# Additionally, the to_dict and from_dict methods must be updated
|
2013-03-26 18:06:00 +01:00
|
|
|
self.user_profile_id = user_profile_id
|
2014-01-27 23:57:45 +01:00
|
|
|
self.user_profile_email = user_profile_email
|
2013-10-17 23:51:25 +02:00
|
|
|
self.realm_id = realm_id
|
2016-07-03 15:58:59 +02:00
|
|
|
self.current_handler_id = None # type: Optional[int]
|
2016-12-27 07:09:35 +01:00
|
|
|
self.current_client_name = None # type: Optional[Text]
|
2013-11-19 23:11:53 +01:00
|
|
|
self.event_queue = event_queue
|
2013-08-05 22:09:12 +02:00
|
|
|
self.queue_timeout = lifespan_secs
|
2013-03-22 22:43:49 +01:00
|
|
|
self.event_types = event_types
|
2013-03-26 18:06:00 +01:00
|
|
|
self.last_connection_time = time.time()
|
|
|
|
self.apply_markdown = apply_markdown
|
2013-10-17 23:51:25 +02:00
|
|
|
self.all_public_streams = all_public_streams
|
2014-01-28 18:13:41 +01:00
|
|
|
self.client_type_name = client_type_name
|
2016-01-27 01:43:47 +01:00
|
|
|
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.add_timeout
|
2013-12-11 23:27:36 +01:00
|
|
|
self.narrow = narrow
|
2013-12-10 16:28:16 +01:00
|
|
|
self.narrow_filter = build_narrow_filter(narrow)
|
2013-03-22 23:25:37 +01:00
|
|
|
|
2013-08-05 22:09:12 +02:00
|
|
|
# Clamp queue_timeout to between minimum and maximum timeouts
|
|
|
|
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS, min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
|
|
|
|
|
2013-11-19 23:51:32 +01:00
|
|
|
def to_dict(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> Dict[str, Any]
|
2013-11-22 20:30:32 +01:00
|
|
|
# If you add a new key to this dict, make sure you add appropriate
|
|
|
|
# migration code in from_dict or load_event_queues to account for
|
|
|
|
# loading event queues that lack that key.
|
2013-11-19 23:51:32 +01:00
|
|
|
return dict(user_profile_id=self.user_profile_id,
|
2014-01-27 23:57:45 +01:00
|
|
|
user_profile_email=self.user_profile_email,
|
2013-11-19 23:51:32 +01:00
|
|
|
realm_id=self.realm_id,
|
|
|
|
event_queue=self.event_queue.to_dict(),
|
|
|
|
queue_timeout=self.queue_timeout,
|
|
|
|
event_types=self.event_types,
|
|
|
|
last_connection_time=self.last_connection_time,
|
|
|
|
apply_markdown=self.apply_markdown,
|
|
|
|
all_public_streams=self.all_public_streams,
|
2013-12-11 23:27:36 +01:00
|
|
|
narrow=self.narrow,
|
2016-01-27 17:04:38 +01:00
|
|
|
client_type_name=self.client_type_name)
|
2013-11-19 23:51:32 +01:00
|
|
|
|
2016-03-20 23:59:29 +01:00
|
|
|
def __repr__(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> str
|
2016-03-20 23:59:29 +01:00
|
|
|
return "ClientDescriptor<%s>" % (self.event_queue.id,)
|
|
|
|
|
2013-11-19 23:51:32 +01:00
|
|
|
@classmethod
|
|
|
|
def from_dict(cls, d):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (MutableMapping[str, Any]) -> ClientDescriptor
|
2014-01-27 23:57:45 +01:00
|
|
|
if 'user_profile_email' not in d:
|
|
|
|
# Temporary migration for the addition of the new user_profile_email field
|
|
|
|
from zerver.models import get_user_profile_by_id
|
|
|
|
d['user_profile_email'] = get_user_profile_by_id(d['user_profile_id']).email
|
2016-01-27 17:04:38 +01:00
|
|
|
if 'client_type' in d:
|
|
|
|
# Temporary migration for the rename of client_type to client_type_name
|
|
|
|
d['client_type_name'] = d['client_type']
|
2014-01-27 23:57:45 +01:00
|
|
|
ret = cls(d['user_profile_id'], d['user_profile_email'], d['realm_id'],
|
2013-11-19 23:51:32 +01:00
|
|
|
EventQueue.from_dict(d['event_queue']), d['event_types'],
|
2016-01-27 17:04:38 +01:00
|
|
|
d['client_type_name'], d['apply_markdown'], d['all_public_streams'],
|
2013-12-11 23:27:36 +01:00
|
|
|
d['queue_timeout'], d.get('narrow', []))
|
2013-11-19 23:51:32 +01:00
|
|
|
ret.last_connection_time = d['last_connection_time']
|
|
|
|
return ret
|
|
|
|
|
2013-03-22 23:25:37 +01:00
|
|
|
def prepare_for_pickling(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> None
|
2014-01-27 23:53:13 +01:00
|
|
|
self.current_handler_id = None
|
2013-03-22 23:25:37 +01:00
|
|
|
self._timeout_handle = None
|
2013-03-26 18:06:00 +01:00
|
|
|
|
|
|
|
def add_event(self, event):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Dict[str, Any]) -> None
|
2014-01-27 23:53:13 +01:00
|
|
|
if self.current_handler_id is not None:
|
|
|
|
handler = get_handler_by_id(self.current_handler_id)
|
|
|
|
async_request_restart(handler._request)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
|
|
|
self.event_queue.push(event)
|
2013-12-12 18:59:02 +01:00
|
|
|
self.finish_current_handler()
|
|
|
|
|
2016-10-19 22:26:55 +02:00
|
|
|
def finish_current_handler(self):
|
2016-12-03 19:50:14 +01:00
|
|
|
# type: () -> bool
|
2014-01-27 23:53:13 +01:00
|
|
|
if self.current_handler_id is not None:
|
2013-12-11 00:21:51 +01:00
|
|
|
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
|
2013-03-26 18:06:00 +01:00
|
|
|
try:
|
2014-01-28 16:59:59 +01:00
|
|
|
finish_handler(self.current_handler_id, self.event_queue.id,
|
|
|
|
self.event_queue.contents(), self.apply_markdown)
|
2013-12-10 23:40:30 +01:00
|
|
|
except Exception:
|
2013-12-11 00:21:51 +01:00
|
|
|
logging.exception(err_msg)
|
2013-12-10 23:44:35 +01:00
|
|
|
finally:
|
2016-10-19 22:26:55 +02:00
|
|
|
self.disconnect_handler()
|
2013-12-12 18:59:02 +01:00
|
|
|
return True
|
|
|
|
return False
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-12-10 16:35:16 +01:00
|
|
|
def accepts_event(self, event):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any]) -> bool
|
2013-12-13 18:06:13 +01:00
|
|
|
if self.event_types is not None and event["type"] not in self.event_types:
|
2013-12-10 16:28:16 +01:00
|
|
|
return False
|
|
|
|
if event["type"] == "message":
|
|
|
|
return self.narrow_filter(event)
|
|
|
|
return True
|
2013-12-10 16:35:16 +01:00
|
|
|
|
|
|
|
# TODO: Refactor so we don't need this function
|
|
|
|
def accepts_messages(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> bool
|
2013-12-10 16:35:16 +01:00
|
|
|
return self.event_types is None or "message" in self.event_types
|
2013-03-22 22:43:49 +01:00
|
|
|
|
2013-03-26 18:06:00 +01:00
|
|
|
def idle(self, now):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (float) -> bool
|
2013-08-05 22:09:12 +02:00
|
|
|
if not hasattr(self, 'queue_timeout'):
|
|
|
|
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
|
|
|
|
|
2017-01-24 05:50:04 +01:00
|
|
|
return (self.current_handler_id is None and
|
|
|
|
now - self.last_connection_time >= self.queue_timeout)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2014-01-28 17:17:21 +01:00
|
|
|
def connect_handler(self, handler_id, client_name):
|
2016-12-27 07:09:35 +01:00
|
|
|
# type: (int, Text) -> None
|
2014-01-27 23:53:13 +01:00
|
|
|
self.current_handler_id = handler_id
|
2014-01-28 17:17:21 +01:00
|
|
|
self.current_client_name = client_name
|
2014-01-28 17:12:18 +01:00
|
|
|
set_descriptor_by_handler_id(handler_id, self)
|
2013-03-26 18:06:00 +01:00
|
|
|
self.last_connection_time = time.time()
|
2016-11-29 07:22:02 +01:00
|
|
|
|
2013-11-21 18:52:32 +01:00
|
|
|
def timeout_callback():
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> None
|
2013-11-21 18:52:32 +01:00
|
|
|
self._timeout_handle = None
|
|
|
|
# All clients get heartbeat events
|
|
|
|
self.add_event(dict(type='heartbeat'))
|
|
|
|
ioloop = tornado.ioloop.IOLoop.instance()
|
|
|
|
heartbeat_time = time.time() + HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
|
2014-01-28 18:13:41 +01:00
|
|
|
if self.client_type_name != 'API: heartbeat test':
|
2013-11-27 16:30:28 +01:00
|
|
|
self._timeout_handle = ioloop.add_timeout(heartbeat_time, timeout_callback)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2016-10-19 22:26:55 +02:00
|
|
|
def disconnect_handler(self, client_closed=False):
|
2016-12-03 19:50:14 +01:00
|
|
|
# type: (bool) -> None
|
2014-01-27 23:53:13 +01:00
|
|
|
if self.current_handler_id:
|
2016-02-13 06:34:14 +01:00
|
|
|
clear_descriptor_by_handler_id(self.current_handler_id, None)
|
2016-03-21 00:12:37 +01:00
|
|
|
clear_handler_by_id(self.current_handler_id)
|
2013-12-12 19:47:24 +01:00
|
|
|
if client_closed:
|
2014-01-27 23:57:45 +01:00
|
|
|
logging.info("Client disconnected for queue %s (%s via %s)" %
|
|
|
|
(self.event_queue.id, self.user_profile_email,
|
2014-01-28 17:17:21 +01:00
|
|
|
self.current_client_name))
|
2014-01-27 23:53:13 +01:00
|
|
|
self.current_handler_id = None
|
2014-01-28 17:17:21 +01:00
|
|
|
self.current_client_name = None
|
2016-10-19 22:26:55 +02:00
|
|
|
if self._timeout_handle is not None:
|
2013-03-22 23:25:37 +01:00
|
|
|
ioloop = tornado.ioloop.IOLoop.instance()
|
|
|
|
ioloop.remove_timeout(self._timeout_handle)
|
|
|
|
self._timeout_handle = None
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-11-19 23:11:30 +01:00
|
|
|
def cleanup(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> None
|
2015-12-27 18:46:31 +01:00
|
|
|
# Before we can GC the event queue, we need to disconnect the
|
|
|
|
# handler and notify the client (or connection server) so that
|
|
|
|
# they can cleanup their own state related to the GC'd event
|
|
|
|
# queue. Finishing the handler before we GC ensures the
|
|
|
|
# invariant that event queues are idle when passed to
|
|
|
|
# `do_gc_event_queues` is preserved.
|
2016-10-19 22:26:55 +02:00
|
|
|
self.finish_current_handler()
|
2016-06-05 22:35:27 +02:00
|
|
|
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id},
|
|
|
|
{self.realm_id})
|
2013-11-19 23:11:30 +01:00
|
|
|
|
2013-11-22 20:30:32 +01:00
|
|
|
def compute_full_event_type(event):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any]) -> str
|
2013-11-22 20:30:32 +01:00
|
|
|
if event["type"] == "update_message_flags":
|
|
|
|
if event["all"]:
|
|
|
|
# Put the "all" case in its own category
|
2013-11-22 21:04:41 +01:00
|
|
|
return "all_flags/%s/%s" % (event["flag"], event["operation"])
|
|
|
|
return "flags/%s/%s" % (event["operation"], event["flag"])
|
2013-11-22 20:30:32 +01:00
|
|
|
return event["type"]
|
|
|
|
|
2013-03-26 18:06:00 +01:00
|
|
|
class EventQueue(object):
|
|
|
|
def __init__(self, id):
|
2016-06-05 23:12:01 +02:00
|
|
|
# type: (str) -> None
|
|
|
|
self.queue = deque() # type: deque[Dict[str, Any]]
|
|
|
|
self.next_event_id = 0 # type: int
|
2016-07-03 15:58:59 +02:00
|
|
|
self.id = id # type: str
|
|
|
|
self.virtual_events = {} # type: Dict[str, Dict[str, Any]]
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-11-19 23:51:32 +01:00
|
|
|
def to_dict(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> Dict[str, Any]
|
2013-11-22 20:30:32 +01:00
|
|
|
# If you add a new key to this dict, make sure you add appropriate
|
|
|
|
# migration code in from_dict or load_event_queues to account for
|
|
|
|
# loading event queues that lack that key.
|
|
|
|
return dict(id=self.id,
|
|
|
|
next_event_id=self.next_event_id,
|
|
|
|
queue=list(self.queue),
|
|
|
|
virtual_events=self.virtual_events)
|
2013-11-19 23:51:32 +01:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_dict(cls, d):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Dict[str, Any]) -> EventQueue
|
2013-11-19 23:51:32 +01:00
|
|
|
ret = cls(d['id'])
|
|
|
|
ret.next_event_id = d['next_event_id']
|
|
|
|
ret.queue = deque(d['queue'])
|
2013-11-22 20:30:32 +01:00
|
|
|
ret.virtual_events = d.get("virtual_events", {})
|
2013-11-19 23:51:32 +01:00
|
|
|
return ret
|
|
|
|
|
2013-03-26 18:06:00 +01:00
|
|
|
def push(self, event):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Dict[str, Any]) -> None
|
2013-03-26 18:06:00 +01:00
|
|
|
event['id'] = self.next_event_id
|
|
|
|
self.next_event_id += 1
|
2013-11-22 20:30:32 +01:00
|
|
|
full_event_type = compute_full_event_type(event)
|
2013-11-22 21:04:41 +01:00
|
|
|
if (full_event_type in ["pointer", "restart"] or
|
2016-12-03 18:19:09 +01:00
|
|
|
full_event_type.startswith("flags/")):
|
2013-11-22 20:30:32 +01:00
|
|
|
if full_event_type not in self.virtual_events:
|
|
|
|
self.virtual_events[full_event_type] = copy.deepcopy(event)
|
|
|
|
return
|
|
|
|
# Update the virtual event with the values from the event
|
|
|
|
virtual_event = self.virtual_events[full_event_type]
|
|
|
|
virtual_event["id"] = event["id"]
|
|
|
|
if "timestamp" in event:
|
|
|
|
virtual_event["timestamp"] = event["timestamp"]
|
|
|
|
if full_event_type == "pointer":
|
|
|
|
virtual_event["pointer"] = event["pointer"]
|
2013-11-22 20:53:59 +01:00
|
|
|
elif full_event_type == "restart":
|
|
|
|
virtual_event["server_generation"] = event["server_generation"]
|
2013-11-22 21:04:41 +01:00
|
|
|
elif full_event_type.startswith("flags/"):
|
|
|
|
virtual_event["messages"] += event["messages"]
|
2013-11-22 20:30:32 +01:00
|
|
|
else:
|
|
|
|
self.queue.append(event)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-12-11 23:09:59 +01:00
|
|
|
# Note that pop ignores virtual events. This is fine in our
|
|
|
|
# current usage since virtual events should always be resolved to
|
|
|
|
# a real event before being given to users.
|
2013-03-26 18:06:00 +01:00
|
|
|
def pop(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> Dict[str, Any]
|
2013-03-26 18:06:00 +01:00
|
|
|
return self.queue.popleft()
|
|
|
|
|
|
|
|
def empty(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> bool
|
2013-12-11 22:58:12 +01:00
|
|
|
return len(self.queue) == 0 and len(self.virtual_events) == 0
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-12-11 23:09:59 +01:00
|
|
|
# See the comment on pop; that applies here as well
|
2013-03-26 18:06:00 +01:00
|
|
|
def prune(self, through_id):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (int) -> None
|
2013-12-11 23:09:59 +01:00
|
|
|
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
|
2013-03-26 18:06:00 +01:00
|
|
|
self.pop()
|
|
|
|
|
|
|
|
def contents(self):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> List[Dict[str, Any]]
|
|
|
|
contents = [] # type: List[Dict[str, Any]]
|
|
|
|
virtual_id_map = {} # type: Dict[str, Dict[str, Any]]
|
2013-11-22 20:30:32 +01:00
|
|
|
for event_type in self.virtual_events:
|
|
|
|
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
|
|
|
|
virtual_ids = sorted(list(virtual_id_map.keys()))
|
|
|
|
|
|
|
|
# Merge the virtual events into their final place in the queue
|
|
|
|
index = 0
|
|
|
|
length = len(virtual_ids)
|
|
|
|
for event in self.queue:
|
|
|
|
while index < length and virtual_ids[index] < event["id"]:
|
|
|
|
contents.append(virtual_id_map[virtual_ids[index]])
|
|
|
|
index += 1
|
|
|
|
contents.append(event)
|
|
|
|
while index < length:
|
|
|
|
contents.append(virtual_id_map[virtual_ids[index]])
|
|
|
|
index += 1
|
|
|
|
|
|
|
|
self.virtual_events = {}
|
|
|
|
self.queue = deque(contents)
|
|
|
|
return contents
|
2013-03-26 18:06:00 +01:00
|
|
|
|
|
|
|
# maps queue ids to client descriptors
|
2016-01-25 23:42:16 +01:00
|
|
|
clients = {} # type: Dict[str, ClientDescriptor]
|
2013-03-26 18:06:00 +01:00
|
|
|
# maps user id to list of client descriptors
|
2016-01-23 01:55:47 +01:00
|
|
|
user_clients = {} # type: Dict[int, List[ClientDescriptor]]
|
2013-10-17 23:51:25 +02:00
|
|
|
# maps realm id to list of client descriptors with all_public_streams=True
|
2016-01-23 01:55:47 +01:00
|
|
|
realm_clients_all_streams = {} # type: Dict[int, List[ClientDescriptor]]
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-05-22 23:49:02 +02:00
|
|
|
# list of registered gc hooks.
|
|
|
|
# each one will be called with a user profile id, queue, and bool
|
|
|
|
# last_for_client that is true if this is the last queue pertaining
|
|
|
|
# to this user_profile_id
|
|
|
|
# that is about to be deleted
|
2016-07-03 15:58:59 +02:00
|
|
|
gc_hooks = [] # type: List[Callable[[int, ClientDescriptor, bool], None]]
|
2013-05-22 23:49:02 +02:00
|
|
|
|
2013-03-26 18:06:00 +01:00
|
|
|
next_queue_id = 0
|
|
|
|
|
2013-05-22 23:49:02 +02:00
|
|
|
def add_client_gc_hook(hook):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Callable[[int, ClientDescriptor, bool], None]) -> None
|
2013-05-22 23:49:02 +02:00
|
|
|
gc_hooks.append(hook)
|
|
|
|
|
2013-03-27 22:19:24 +01:00
|
|
|
def get_client_descriptor(queue_id):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (str) -> ClientDescriptor
|
2013-03-27 22:19:24 +01:00
|
|
|
return clients.get(queue_id)
|
|
|
|
|
|
|
|
def get_client_descriptors_for_user(user_profile_id):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (int) -> List[ClientDescriptor]
|
2013-03-27 22:19:24 +01:00
|
|
|
return user_clients.get(user_profile_id, [])
|
|
|
|
|
2013-10-17 23:51:25 +02:00
|
|
|
def get_client_descriptors_for_realm_all_streams(realm_id):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (int) -> List[ClientDescriptor]
|
2013-10-17 23:51:25 +02:00
|
|
|
return realm_clients_all_streams.get(realm_id, [])
|
|
|
|
|
2013-12-13 20:59:56 +01:00
|
|
|
def add_to_client_dicts(client):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (ClientDescriptor) -> None
|
2013-12-13 20:59:56 +01:00
|
|
|
user_clients.setdefault(client.user_profile_id, []).append(client)
|
2013-12-13 20:20:28 +01:00
|
|
|
if client.all_public_streams or client.narrow != []:
|
2013-12-13 20:59:56 +01:00
|
|
|
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
|
|
|
|
|
2014-01-28 18:11:08 +01:00
|
|
|
def allocate_client_descriptor(new_queue_data):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (MutableMapping[str, Any]) -> ClientDescriptor
|
2013-03-26 18:06:00 +01:00
|
|
|
global next_queue_id
|
2014-01-28 18:11:08 +01:00
|
|
|
queue_id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
|
2013-03-26 18:06:00 +01:00
|
|
|
next_queue_id += 1
|
2014-01-28 18:11:08 +01:00
|
|
|
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
|
|
|
|
client = ClientDescriptor.from_dict(new_queue_data)
|
|
|
|
clients[queue_id] = client
|
2013-12-13 20:59:56 +01:00
|
|
|
add_to_client_dicts(client)
|
2013-03-26 18:06:00 +01:00
|
|
|
return client
|
|
|
|
|
2013-11-19 23:11:30 +01:00
|
|
|
def do_gc_event_queues(to_remove, affected_users, affected_realms):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (AbstractSet[str], AbstractSet[int], AbstractSet[int]) -> None
|
2013-10-17 23:51:25 +02:00
|
|
|
def filter_client_dict(client_dict, key):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (MutableMapping[int, List[ClientDescriptor]], int) -> None
|
2013-10-17 23:51:25 +02:00
|
|
|
if key not in client_dict:
|
|
|
|
return
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2015-11-01 17:14:31 +01:00
|
|
|
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
|
2013-04-09 19:24:55 +02:00
|
|
|
if len(new_client_list) == 0:
|
2013-10-17 23:51:25 +02:00
|
|
|
del client_dict[key]
|
2013-04-09 19:24:55 +02:00
|
|
|
else:
|
2013-10-17 23:51:25 +02:00
|
|
|
client_dict[key] = new_client_list
|
|
|
|
|
|
|
|
for user_id in affected_users:
|
|
|
|
filter_client_dict(user_clients, user_id)
|
|
|
|
|
|
|
|
for realm_id in affected_realms:
|
|
|
|
filter_client_dict(realm_clients_all_streams, realm_id)
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-05-22 23:49:02 +02:00
|
|
|
for id in to_remove:
|
|
|
|
for cb in gc_hooks:
|
|
|
|
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
|
|
|
|
del clients[id]
|
|
|
|
|
2013-11-19 23:11:30 +01:00
|
|
|
def gc_event_queues():
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> None
|
2013-11-19 23:11:30 +01:00
|
|
|
start = time.time()
|
2016-07-03 15:58:59 +02:00
|
|
|
to_remove = set() # type: Set[str]
|
|
|
|
affected_users = set() # type: Set[int]
|
|
|
|
affected_realms = set() # type: Set[int]
|
2016-03-11 10:57:29 +01:00
|
|
|
for (id, client) in six.iteritems(clients):
|
2013-11-19 23:11:30 +01:00
|
|
|
if client.idle(start):
|
|
|
|
to_remove.add(id)
|
|
|
|
affected_users.add(client.user_profile_id)
|
|
|
|
affected_realms.add(client.realm_id)
|
|
|
|
|
2015-12-27 18:46:31 +01:00
|
|
|
# We don't need to call e.g. finish_current_handler on the clients
|
|
|
|
# being removed because they are guaranteed to be idle and thus
|
|
|
|
# not have a current handler.
|
2013-11-19 23:11:30 +01:00
|
|
|
do_gc_event_queues(to_remove, affected_users, affected_realms)
|
|
|
|
|
2017-01-24 05:50:04 +01:00
|
|
|
logging.info(('Tornado removed %d idle event queues owned by %d users in %.3fs.' +
|
|
|
|
' Now %d active queues, %s')
|
2013-03-26 18:06:00 +01:00
|
|
|
% (len(to_remove), len(affected_users), time.time() - start,
|
2016-03-21 00:31:18 +01:00
|
|
|
len(clients), handler_stats_string()))
|
2013-05-24 23:27:19 +02:00
|
|
|
statsd.gauge('tornado.active_queues', len(clients))
|
|
|
|
statsd.gauge('tornado.active_users', len(user_clients))
|
2013-03-26 18:06:00 +01:00
|
|
|
|
2013-03-20 23:03:41 +01:00
|
|
|
def dump_event_queues():
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> None
|
2013-03-20 23:03:41 +01:00
|
|
|
start = time.time()
|
|
|
|
|
2015-10-14 22:31:08 +02:00
|
|
|
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "w") as stored_queues:
|
2016-03-11 10:57:29 +01:00
|
|
|
ujson.dump([(qid, client.to_dict()) for (qid, client) in six.iteritems(clients)],
|
2013-11-19 23:51:32 +01:00
|
|
|
stored_queues)
|
2013-03-20 23:03:41 +01:00
|
|
|
|
|
|
|
logging.info('Tornado dumped %d event queues in %.3fs'
|
|
|
|
% (len(clients), time.time() - start))
|
|
|
|
|
2013-03-20 23:53:46 +01:00
|
|
|
def load_event_queues():
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> None
|
2013-03-20 23:03:41 +01:00
|
|
|
global clients
|
|
|
|
start = time.time()
|
2013-11-19 23:51:32 +01:00
|
|
|
|
2014-01-28 19:07:45 +01:00
|
|
|
# ujson chokes on bad input pretty easily. We separate out the actual
|
|
|
|
# file reading from the loading so that we don't silently fail if we get
|
|
|
|
# bad input.
|
|
|
|
try:
|
2016-01-29 02:48:54 +01:00
|
|
|
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
|
2014-01-28 19:07:45 +01:00
|
|
|
json_data = stored_queues.read()
|
2013-11-19 23:51:32 +01:00
|
|
|
try:
|
2014-01-28 19:07:45 +01:00
|
|
|
clients = dict((qid, ClientDescriptor.from_dict(client))
|
|
|
|
for (qid, client) in ujson.loads(json_data))
|
|
|
|
except Exception:
|
|
|
|
logging.exception("Could not deserialize event queues")
|
|
|
|
except (IOError, EOFError):
|
|
|
|
pass
|
2013-03-20 23:03:41 +01:00
|
|
|
|
2016-03-11 10:57:29 +01:00
|
|
|
for client in six.itervalues(clients):
|
2013-11-19 23:25:59 +01:00
|
|
|
# Put code for migrations due to event queue data format changes here
|
2013-10-17 23:51:25 +02:00
|
|
|
|
2013-12-13 20:59:56 +01:00
|
|
|
add_to_client_dicts(client)
|
2013-03-20 23:03:41 +01:00
|
|
|
|
|
|
|
logging.info('Tornado loaded %d event queues in %.3fs'
|
|
|
|
% (len(clients), time.time() - start))
|
|
|
|
|
2016-03-31 07:42:34 +02:00
|
|
|
def send_restart_events(immediate=False):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (bool) -> None
|
|
|
|
event = dict(type='restart', server_generation=settings.SERVER_GENERATION) # type: Dict[str, Any]
|
2016-03-31 07:42:34 +02:00
|
|
|
if immediate:
|
|
|
|
event['immediate'] = True
|
2016-03-11 10:57:29 +01:00
|
|
|
for client in six.itervalues(clients):
|
2013-12-10 16:35:16 +01:00
|
|
|
if client.accepts_event(event):
|
|
|
|
client.add_event(event.copy())
|
2013-03-20 23:53:46 +01:00
|
|
|
|
2013-03-22 23:24:28 +01:00
|
|
|
def setup_event_queue():
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: () -> None
|
2016-04-13 04:28:49 +02:00
|
|
|
if not settings.TEST_SUITE:
|
|
|
|
load_event_queues()
|
|
|
|
atexit.register(dump_event_queues)
|
|
|
|
# Make sure we dump event queues even if we exit via signal
|
|
|
|
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
|
|
|
|
tornado.autoreload.add_reload_hook(dump_event_queues) # type: ignore # TODO: Fix missing tornado.autoreload stub
|
2013-03-20 23:03:41 +01:00
|
|
|
|
2013-11-19 23:51:32 +01:00
|
|
|
try:
|
|
|
|
os.rename(settings.JSON_PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.json.last")
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
2013-03-20 23:03:41 +01:00
|
|
|
# Set up event queue garbage collection
|
2013-03-22 23:24:28 +01:00
|
|
|
ioloop = tornado.ioloop.IOLoop.instance()
|
|
|
|
pc = tornado.ioloop.PeriodicCallback(gc_event_queues,
|
|
|
|
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
|
2013-03-26 18:06:00 +01:00
|
|
|
pc.start()
|
2013-03-14 23:21:53 +01:00
|
|
|
|
2016-03-31 07:42:34 +02:00
|
|
|
send_restart_events(immediate=settings.DEVELOPMENT)
|
2013-03-20 23:53:46 +01:00
|
|
|
|
2014-01-28 18:11:08 +01:00
|
|
|
def fetch_events(query):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any]) -> Dict[str, Any]
|
|
|
|
queue_id = query["queue_id"] # type: str
|
|
|
|
dont_block = query["dont_block"] # type: bool
|
|
|
|
last_event_id = query["last_event_id"] # type: int
|
|
|
|
user_profile_id = query["user_profile_id"] # type: int
|
|
|
|
new_queue_data = query.get("new_queue_data") # type: Optional[MutableMapping[str, Any]]
|
2016-12-27 07:09:35 +01:00
|
|
|
user_profile_email = query["user_profile_email"] # type: Text
|
|
|
|
client_type_name = query["client_type_name"] # type: Text
|
2016-07-03 15:58:59 +02:00
|
|
|
handler_id = query["handler_id"] # type: int
|
2014-01-28 18:11:08 +01:00
|
|
|
|
2015-12-27 20:36:20 +01:00
|
|
|
try:
|
|
|
|
was_connected = False
|
|
|
|
orig_queue_id = queue_id
|
|
|
|
extra_log_data = ""
|
|
|
|
if queue_id is None:
|
|
|
|
if dont_block:
|
|
|
|
client = allocate_client_descriptor(new_queue_data)
|
|
|
|
queue_id = client.event_queue.id
|
|
|
|
else:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_("Missing 'queue_id' argument"))
|
2014-01-27 23:21:39 +01:00
|
|
|
else:
|
2015-12-27 20:36:20 +01:00
|
|
|
if last_event_id is None:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_("Missing 'last_event_id' argument"))
|
2015-12-27 20:36:20 +01:00
|
|
|
client = get_client_descriptor(queue_id)
|
|
|
|
if client is None:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_("Bad event queue id: %s") % (queue_id,))
|
2015-12-27 20:36:20 +01:00
|
|
|
if user_profile_id != client.user_profile_id:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_("You are not authorized to get events from this queue"))
|
2015-12-27 20:36:20 +01:00
|
|
|
client.event_queue.prune(last_event_id)
|
|
|
|
was_connected = client.finish_current_handler()
|
|
|
|
|
|
|
|
if not client.event_queue.empty() or dont_block:
|
2014-01-28 20:03:05 +01:00
|
|
|
response = dict(events=client.event_queue.contents(),
|
2016-07-03 15:58:59 +02:00
|
|
|
handler_id=handler_id) # type: Dict[str, Any]
|
2015-12-27 20:36:20 +01:00
|
|
|
if orig_queue_id is None:
|
2014-01-28 20:03:05 +01:00
|
|
|
response['queue_id'] = queue_id
|
2016-10-19 22:14:52 +02:00
|
|
|
if len(response["events"]) == 1:
|
|
|
|
extra_log_data = "[%s/%s/%s]" % (queue_id, len(response["events"]),
|
|
|
|
response["events"][0]["type"])
|
|
|
|
else:
|
|
|
|
extra_log_data = "[%s/%s]" % (queue_id, len(response["events"]))
|
2015-12-27 20:36:20 +01:00
|
|
|
if was_connected:
|
|
|
|
extra_log_data += " [was connected]"
|
2014-01-28 20:03:05 +01:00
|
|
|
return dict(type="response", response=response, extra_log_data=extra_log_data)
|
2015-12-27 20:36:20 +01:00
|
|
|
|
|
|
|
# After this point, dont_block=False, the queue is empty, and we
|
|
|
|
# have a pre-existing queue, so we wait for new events.
|
2014-01-27 23:21:39 +01:00
|
|
|
if was_connected:
|
2015-12-27 20:36:20 +01:00
|
|
|
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_email,
|
|
|
|
client_type_name))
|
|
|
|
except JsonableError as e:
|
2014-01-28 20:03:05 +01:00
|
|
|
if hasattr(e, 'to_json_error_msg') and callable(e.to_json_error_msg):
|
|
|
|
return dict(type="error", handler_id=handler_id,
|
|
|
|
message=e.to_json_error_msg())
|
2015-12-27 20:36:20 +01:00
|
|
|
raise e
|
|
|
|
|
2014-01-28 18:13:41 +01:00
|
|
|
client.connect_handler(handler_id, client_type_name)
|
2014-01-28 20:03:05 +01:00
|
|
|
return dict(type="async")
|
2014-01-27 23:21:39 +01:00
|
|
|
|
2013-03-22 18:21:50 +01:00
|
|
|
# The following functions are called from Django
|
|
|
|
|
2013-03-29 16:08:24 +01:00
|
|
|
# Workaround to support the Python-requests 1.0 transition of .json
|
|
|
|
# from a property to a function
|
|
|
|
requests_json_is_function = callable(requests.Response.json)
|
|
|
|
def extract_json_response(resp):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (requests.Response) -> Dict[str, Any]
|
2013-03-29 16:08:24 +01:00
|
|
|
if requests_json_is_function:
|
|
|
|
return resp.json()
|
|
|
|
else:
|
2016-07-03 15:58:59 +02:00
|
|
|
return resp.json # type: ignore # mypy trusts the stub, not the runtime type checking of this fn
|
2013-03-29 16:08:24 +01:00
|
|
|
|
2013-05-07 17:25:25 +02:00
|
|
|
def request_event_queue(user_profile, user_client, apply_markdown,
|
2013-12-10 16:28:16 +01:00
|
|
|
queue_lifespan_secs, event_types=None, all_public_streams=False,
|
|
|
|
narrow=[]):
|
2016-12-27 07:09:35 +01:00
|
|
|
# type: (UserProfile, Client, bool, int, Optional[Iterable[str]], bool, Iterable[Sequence[Text]]) -> Optional[str]
|
2013-03-14 23:21:53 +01:00
|
|
|
if settings.TORNADO_SERVER:
|
2016-12-02 00:08:34 +01:00
|
|
|
req = {'dont_block': 'true',
|
2013-06-18 23:55:55 +02:00
|
|
|
'apply_markdown': ujson.dumps(apply_markdown),
|
2013-10-17 23:51:25 +02:00
|
|
|
'all_public_streams': ujson.dumps(all_public_streams),
|
2016-12-02 00:08:34 +01:00
|
|
|
'client': 'internal',
|
|
|
|
'user_client': user_client.name,
|
|
|
|
'narrow': ujson.dumps(narrow),
|
|
|
|
'lifespan_secs': queue_lifespan_secs}
|
2013-03-22 22:43:49 +01:00
|
|
|
if event_types is not None:
|
2013-06-18 23:55:55 +02:00
|
|
|
req['event_types'] = ujson.dumps(event_types)
|
2016-12-18 11:35:36 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
|
|
|
|
auth=requests.auth.HTTPBasicAuth(
|
|
|
|
user_profile.email, user_profile.api_key),
|
|
|
|
params=req)
|
|
|
|
except requests.adapters.ConnectionError:
|
|
|
|
logging.error('Tornado server does not seem to be running, check %s '
|
|
|
|
'and %s for more information.' %
|
|
|
|
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
|
|
|
|
raise requests.adapters.ConnectionError(
|
|
|
|
"Django cannot connect to Tornado server (%s); try restarting" %
|
|
|
|
(settings.TORNADO_SERVER))
|
2013-03-14 23:21:53 +01:00
|
|
|
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
2013-03-29 16:08:24 +01:00
|
|
|
return extract_json_response(resp)['queue_id']
|
2013-03-14 23:21:53 +01:00
|
|
|
|
|
|
|
return None
|
2013-03-22 18:21:50 +01:00
|
|
|
|
|
|
|
def get_user_events(user_profile, queue_id, last_event_id):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (UserProfile, str, int) -> List[Dict]
|
2013-03-22 18:21:50 +01:00
|
|
|
if settings.TORNADO_SERVER:
|
2016-08-30 12:03:01 +02:00
|
|
|
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
|
|
|
|
auth=requests.auth.HTTPBasicAuth(
|
|
|
|
user_profile.email, user_profile.api_key),
|
2016-12-02 00:08:34 +01:00
|
|
|
params={'queue_id': queue_id,
|
2016-08-30 12:03:01 +02:00
|
|
|
'last_event_id': last_event_id,
|
2016-12-02 00:08:34 +01:00
|
|
|
'dont_block': 'true',
|
|
|
|
'client': 'internal'})
|
2013-03-22 18:21:50 +01:00
|
|
|
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
2013-03-29 16:08:24 +01:00
|
|
|
return extract_json_response(resp)['events']
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
|
|
|
|
# Send email notifications to idle users
|
|
|
|
# after they are idle for 1 hour
|
|
|
|
NOTIFY_AFTER_IDLE_HOURS = 1
|
|
|
|
def build_offline_notification(user_profile_id, message_id):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (int, int) -> Dict[str, Any]
|
2014-04-24 02:16:53 +02:00
|
|
|
return {"user_profile_id": user_profile_id,
|
|
|
|
"message_id": message_id,
|
|
|
|
"timestamp": time.time()}
|
|
|
|
|
|
|
|
def missedmessage_hook(user_profile_id, queue, last_for_client):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (int, ClientDescriptor, bool) -> None
|
2014-04-24 02:16:53 +02:00
|
|
|
# Only process missedmessage hook when the last queue for a
|
|
|
|
# client has been garbage collected
|
|
|
|
if not last_for_client:
|
|
|
|
return
|
|
|
|
|
2016-07-03 15:58:59 +02:00
|
|
|
message_ids_to_notify = [] # type: List[Dict[str, Any]]
|
2014-04-24 02:16:53 +02:00
|
|
|
for event in queue.event_queue.contents():
|
|
|
|
if not event['type'] == 'message' or not event['flags']:
|
|
|
|
continue
|
|
|
|
|
2016-05-10 01:55:43 +02:00
|
|
|
if 'mentioned' in event['flags'] and 'read' not in event['flags']:
|
2014-04-24 02:16:53 +02:00
|
|
|
notify_info = dict(message_id=event['message']['id'])
|
|
|
|
|
|
|
|
if not event.get('push_notified', False):
|
|
|
|
notify_info['send_push'] = True
|
|
|
|
if not event.get('email_notified', False):
|
|
|
|
notify_info['send_email'] = True
|
|
|
|
message_ids_to_notify.append(notify_info)
|
|
|
|
|
|
|
|
for notify_info in message_ids_to_notify:
|
|
|
|
msg_id = notify_info['message_id']
|
|
|
|
notice = build_offline_notification(user_profile_id, msg_id)
|
|
|
|
if notify_info.get('send_push', False):
|
|
|
|
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
|
|
|
|
if notify_info.get('send_email', False):
|
|
|
|
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
|
|
|
|
|
|
|
|
def receiver_is_idle(user_profile_id, realm_presences):
|
2016-12-27 07:09:35 +01:00
|
|
|
# type: (int, Optional[Dict[int, Dict[Text, Dict[str, Any]]]]) -> bool
|
2014-04-24 02:16:53 +02:00
|
|
|
# If a user has no message-receiving event queues, they've got no open zulip
|
|
|
|
# session so we notify them
|
|
|
|
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
|
|
|
|
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
|
|
|
|
off_zulip = len(message_event_queues) == 0
|
|
|
|
|
|
|
|
# It's possible a recipient is not in the realm of a sender. We don't have
|
|
|
|
# presence information in this case (and it's hard to get without an additional
|
|
|
|
# db query) so we simply don't try to guess if this cross-realm recipient
|
|
|
|
# has been idle for too long
|
2016-05-10 01:55:43 +02:00
|
|
|
if realm_presences is None or user_profile_id not in realm_presences:
|
2014-04-24 02:16:53 +02:00
|
|
|
return off_zulip
|
|
|
|
|
|
|
|
# We want to find the newest "active" presence entity and compare that to the
|
|
|
|
# activity expiry threshold.
|
|
|
|
user_presence = realm_presences[user_profile_id]
|
|
|
|
latest_active_timestamp = None
|
|
|
|
idle = False
|
|
|
|
|
2016-03-11 10:57:29 +01:00
|
|
|
for client, status in six.iteritems(user_presence):
|
2014-04-24 02:16:53 +02:00
|
|
|
if (latest_active_timestamp is None or status['timestamp'] > latest_active_timestamp) and \
|
|
|
|
status['status'] == 'active':
|
|
|
|
latest_active_timestamp = status['timestamp']
|
|
|
|
|
|
|
|
if latest_active_timestamp is None:
|
|
|
|
idle = True
|
|
|
|
else:
|
|
|
|
active_datetime = timestamp_to_datetime(latest_active_timestamp)
|
|
|
|
# 140 seconds is consistent with activity.js:OFFLINE_THRESHOLD_SECS
|
2017-02-25 21:02:13 +01:00
|
|
|
idle = timezone.now() - active_datetime > datetime.timedelta(seconds=140)
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
return off_zulip or idle
|
|
|
|
|
|
|
|
def process_message_event(event_template, users):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
|
2016-12-27 07:09:35 +01:00
|
|
|
realm_presences = {int(k): v for k, v in event_template['presences'].items()} # type: Dict[int, Dict[Text, Dict[str, Any]]]
|
2016-07-03 15:58:59 +02:00
|
|
|
sender_queue_id = event_template.get('sender_queue_id', None) # type: Optional[str]
|
|
|
|
message_dict_markdown = event_template['message_dict_markdown'] # type: Dict[str, Any]
|
|
|
|
message_dict_no_markdown = event_template['message_dict_no_markdown'] # type: Dict[str, Any]
|
|
|
|
sender_id = message_dict_markdown['sender_id'] # type: int
|
|
|
|
message_id = message_dict_markdown['id'] # type: int
|
|
|
|
message_type = message_dict_markdown['type'] # type: str
|
2016-12-27 07:09:35 +01:00
|
|
|
sending_client = message_dict_markdown['client'] # type: Text
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
# To remove duplicate clients: Maps queue ID to {'client': Client, 'flags': flags}
|
2016-07-03 15:58:59 +02:00
|
|
|
send_to_clients = {} # type: Dict[str, Dict[str, Any]]
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
# Extra user-specific data to include
|
2016-07-03 15:58:59 +02:00
|
|
|
extra_user_data = {} # type: Dict[int, Any]
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
if 'stream_name' in event_template and not event_template.get("invite_only"):
|
|
|
|
for client in get_client_descriptors_for_realm_all_streams(event_template['realm_id']):
|
|
|
|
send_to_clients[client.event_queue.id] = {'client': client, 'flags': None}
|
|
|
|
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
|
|
|
|
send_to_clients[client.event_queue.id]['is_sender'] = True
|
|
|
|
|
|
|
|
for user_data in users:
|
2016-07-03 15:58:59 +02:00
|
|
|
user_profile_id = user_data['id'] # type: int
|
|
|
|
flags = user_data.get('flags', []) # type: Iterable[str]
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
for client in get_client_descriptors_for_user(user_profile_id):
|
|
|
|
send_to_clients[client.event_queue.id] = {'client': client, 'flags': flags}
|
|
|
|
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
|
|
|
|
send_to_clients[client.event_queue.id]['is_sender'] = True
|
|
|
|
|
|
|
|
# If the recipient was offline and the message was a single or group PM to him
|
|
|
|
# or she was @-notified potentially notify more immediately
|
|
|
|
received_pm = message_type == "private" and user_profile_id != sender_id
|
|
|
|
mentioned = 'mentioned' in flags
|
|
|
|
idle = receiver_is_idle(user_profile_id, realm_presences)
|
|
|
|
always_push_notify = user_data.get('always_push_notify', False)
|
|
|
|
if (received_pm or mentioned) and (idle or always_push_notify):
|
|
|
|
notice = build_offline_notification(user_profile_id, message_id)
|
|
|
|
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
|
2016-07-03 15:58:59 +02:00
|
|
|
notified = dict(push_notified=True) # type: Dict[str, bool]
|
2014-04-24 02:16:53 +02:00
|
|
|
# Don't send missed message emails if always_push_notify is True
|
|
|
|
if idle:
|
|
|
|
# We require RabbitMQ to do this, as we can't call the email handler
|
|
|
|
# from the Tornado process. So if there's no rabbitmq support do nothing
|
|
|
|
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
|
|
|
|
notified['email_notified'] = True
|
|
|
|
|
|
|
|
extra_user_data[user_profile_id] = notified
|
|
|
|
|
2016-03-11 10:57:29 +01:00
|
|
|
for client_data in six.itervalues(send_to_clients):
|
2014-04-24 02:16:53 +02:00
|
|
|
client = client_data['client']
|
|
|
|
flags = client_data['flags']
|
2016-07-03 15:58:59 +02:00
|
|
|
is_sender = client_data.get('is_sender', False) # type: bool
|
|
|
|
extra_data = extra_user_data.get(client.user_profile_id, None) # type: Optional[Mapping[str, bool]]
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
if not client.accepts_messages():
|
|
|
|
# The actual check is the accepts_event() check below;
|
|
|
|
# this line is just an optimization to avoid copying
|
|
|
|
# message data unnecessarily
|
|
|
|
continue
|
|
|
|
|
|
|
|
if client.apply_markdown:
|
|
|
|
message_dict = message_dict_markdown
|
|
|
|
else:
|
|
|
|
message_dict = message_dict_no_markdown
|
|
|
|
|
|
|
|
# Make sure Zephyr mirroring bots know whether stream is invite-only
|
2014-01-28 18:13:41 +01:00
|
|
|
if "mirror" in client.client_type_name and event_template.get("invite_only"):
|
2014-04-24 02:16:53 +02:00
|
|
|
message_dict = message_dict.copy()
|
|
|
|
message_dict["invite_only_stream"] = True
|
|
|
|
|
2016-12-14 08:12:50 +01:00
|
|
|
if flags is not None:
|
|
|
|
message_dict['is_mentioned'] = 'mentioned' in flags
|
2016-07-03 15:58:59 +02:00
|
|
|
user_event = dict(type='message', message=message_dict, flags=flags) # type: Dict[str, Any]
|
2014-04-24 02:16:53 +02:00
|
|
|
if extra_data is not None:
|
|
|
|
user_event.update(extra_data)
|
|
|
|
|
|
|
|
if is_sender:
|
|
|
|
local_message_id = event_template.get('local_id', None)
|
|
|
|
if local_message_id is not None:
|
|
|
|
user_event["local_message_id"] = local_message_id
|
|
|
|
|
|
|
|
if not client.accepts_event(user_event):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# The below prevents (Zephyr) mirroring loops.
|
|
|
|
if ('mirror' in sending_client and
|
2016-12-03 18:19:09 +01:00
|
|
|
sending_client.lower() == client.client_type_name.lower()):
|
2014-04-24 02:16:53 +02:00
|
|
|
continue
|
|
|
|
client.add_event(user_event)
|
|
|
|
|
|
|
|
def process_event(event, users):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any], Iterable[int]) -> None
|
2014-04-24 02:16:53 +02:00
|
|
|
for user_profile_id in users:
|
|
|
|
for client in get_client_descriptors_for_user(user_profile_id):
|
|
|
|
if client.accepts_event(event):
|
2016-07-03 15:58:59 +02:00
|
|
|
client.add_event(dict(event))
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
def process_userdata_event(event_template, users):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
|
2014-04-24 02:16:53 +02:00
|
|
|
for user_data in users:
|
|
|
|
user_profile_id = user_data['id']
|
2016-07-03 15:58:59 +02:00
|
|
|
user_event = dict(event_template) # shallow copy, but deep enough for our needs
|
2014-04-24 02:16:53 +02:00
|
|
|
for key in user_data.keys():
|
|
|
|
if key != "id":
|
|
|
|
user_event[key] = user_data[key]
|
|
|
|
|
|
|
|
for client in get_client_descriptors_for_user(user_profile_id):
|
|
|
|
if client.accepts_event(user_event):
|
|
|
|
client.add_event(user_event)
|
|
|
|
|
|
|
|
def process_notification(notice):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any]) -> None
|
|
|
|
event = notice['event'] # type: Mapping[str, Any]
|
|
|
|
users = notice['users'] # type: Union[Iterable[int], Iterable[Mapping[str, Any]]]
|
2014-04-24 02:16:53 +02:00
|
|
|
if event['type'] in ["update_message"]:
|
2016-07-03 15:58:59 +02:00
|
|
|
process_userdata_event(event, cast(Iterable[Mapping[str, Any]], users))
|
2014-04-24 02:16:53 +02:00
|
|
|
elif event['type'] == "message":
|
2016-07-03 15:58:59 +02:00
|
|
|
process_message_event(event, cast(Iterable[Mapping[str, Any]], users))
|
2014-04-24 02:16:53 +02:00
|
|
|
else:
|
2016-07-03 15:58:59 +02:00
|
|
|
process_event(event, cast(Iterable[int], users))
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
# Runs in the Django process to send a notification to Tornado.
|
|
|
|
#
|
|
|
|
# We use JSON rather than bare form parameters, so that we can represent
|
|
|
|
# different types and for compatibility with non-HTTP transports.
|
|
|
|
|
|
|
|
def send_notification_http(data):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any]) -> None
|
2014-04-24 02:16:53 +02:00
|
|
|
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
|
2016-08-30 12:03:01 +02:00
|
|
|
requests_client.post(settings.TORNADO_SERVER + '/notify_tornado', data=dict(
|
2017-01-24 07:06:13 +01:00
|
|
|
data = ujson.dumps(data),
|
|
|
|
secret = settings.SHARED_SECRET))
|
2014-04-24 02:16:53 +02:00
|
|
|
else:
|
|
|
|
process_notification(data)
|
|
|
|
|
|
|
|
def send_notification(data):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any]) -> None
|
2016-06-05 21:32:00 +02:00
|
|
|
queue_json_publish("notify_tornado", data, send_notification_http)
|
2014-04-24 02:16:53 +02:00
|
|
|
|
|
|
|
def send_event(event, users):
|
2016-07-03 15:58:59 +02:00
|
|
|
# type: (Mapping[str, Any], Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None
|
2016-06-05 21:32:00 +02:00
|
|
|
"""`users` is a list of user IDs, or in the case of `message` type
|
|
|
|
events, a list of dicts describing the users and metadata about
|
|
|
|
the user/message pair."""
|
|
|
|
queue_json_publish("notify_tornado",
|
|
|
|
dict(event=event, users=users),
|
|
|
|
send_notification_http)
|