2017-11-16 19:54:24 +01:00
|
|
|
# Documented in https://zulip.readthedocs.io/en/latest/subsystems/queuing.html
|
2018-03-09 19:29:20 +01:00
|
|
|
from typing import Any, Callable, Dict, List, Mapping, Optional, cast, TypeVar, Type
|
2013-08-29 23:41:03 +02:00
|
|
|
|
2018-01-30 20:06:23 +01:00
|
|
|
import copy
|
2017-07-03 12:52:55 +02:00
|
|
|
import signal
|
2019-03-27 00:57:33 +01:00
|
|
|
import tempfile
|
2017-09-15 09:38:12 +02:00
|
|
|
from functools import wraps
|
queue_processors: Rewrite MissedMessageWorker to always wait.
Previously, MissedMessageWorker used a batching strategy of just
grabbing all the events from the last 2 minutes, and then sending them
off as emails. This suffered from the problem that you had a random
time, between 0s and 120s, to edit your message before it would be
sent out via an email.
Additionally, this made the queue had to monitor, because it was
expected to pile up large numbers of events, even if everything was
fine.
We fix this by batching together the events using a timer; the queue
processor itself just tracks the items, and then a timer-handler
process takes care of ensuring that the emails get sent at least 120s
(and at most 130s) after the first triggering message was sent in Zulip.
This introduces a new unpleasant bug, namely that when we restart a
Zulip server, we can now lose some missed_message email events;
further work is required on this point.
Fixes #6839.
2018-10-24 21:08:38 +02:00
|
|
|
from threading import Timer
|
2017-09-15 09:38:12 +02:00
|
|
|
|
|
|
|
import smtplib
|
|
|
|
import socket
|
2017-05-30 08:10:19 +02:00
|
|
|
|
2013-09-03 22:33:20 +02:00
|
|
|
from django.conf import settings
|
2017-07-03 12:52:55 +02:00
|
|
|
from django.db import connection
|
2013-11-08 02:02:48 +01:00
|
|
|
from django.core.handlers.wsgi import WSGIRequest
|
|
|
|
from django.core.handlers.base import BaseHandler
|
2017-05-22 23:37:15 +02:00
|
|
|
from zerver.models import \
|
2019-02-02 23:53:55 +01:00
|
|
|
get_client, get_system_bot, PreregistrationUser, \
|
|
|
|
get_user_profile_by_id, Message, Realm, UserMessage, UserProfile, \
|
2018-03-14 00:09:11 +01:00
|
|
|
Client
|
2013-10-29 20:03:42 +01:00
|
|
|
from zerver.lib.context_managers import lockfile
|
2017-01-24 07:37:46 +01:00
|
|
|
from zerver.lib.error_notify import do_report_error
|
2017-03-06 08:43:20 +01:00
|
|
|
from zerver.lib.feedback import handle_feedback
|
2017-09-15 09:38:12 +02:00
|
|
|
from zerver.lib.queue import SimpleQueueClient, queue_json_publish, retry_event
|
2013-09-04 00:00:44 +02:00
|
|
|
from zerver.lib.timestamp import timestamp_to_datetime
|
2019-03-15 18:51:39 +01:00
|
|
|
from zerver.lib.email_notifications import handle_missedmessage_emails
|
2018-11-27 18:12:11 +01:00
|
|
|
from zerver.lib.push_notifications import handle_push_notification, handle_remove_push_notification, \
|
|
|
|
initialize_push_notifications
|
2014-01-24 22:29:17 +01:00
|
|
|
from zerver.lib.actions import do_send_confirmation_email, \
|
2013-09-30 17:53:46 +02:00
|
|
|
do_update_user_activity, do_update_user_activity_interval, do_update_user_presence, \
|
2019-08-02 00:14:58 +02:00
|
|
|
internal_send_message, internal_send_private_message, notify_realm_export, \
|
2017-11-13 21:24:51 +01:00
|
|
|
render_incoming_message, do_update_embedded_data, do_mark_stream_messages_as_read
|
2016-10-27 12:06:44 +02:00
|
|
|
from zerver.lib.url_preview import preview as url_preview
|
2013-10-21 23:26:41 +02:00
|
|
|
from zerver.lib.digest import handle_digest_email
|
2017-07-12 01:05:59 +02:00
|
|
|
from zerver.lib.send_email import send_future_email, send_email_from_dict, \
|
2018-12-04 23:34:04 +01:00
|
|
|
FromAddress, EmailNotDeliveredException, handle_send_email_format_changes
|
2019-03-16 11:39:09 +01:00
|
|
|
from zerver.lib.email_mirror import process_message as mirror_email, rate_limit_mirror_by_realm, \
|
|
|
|
is_missed_message_address, extract_and_validate
|
2017-11-13 21:24:51 +01:00
|
|
|
from zerver.lib.streams import access_stream_by_id
|
2017-11-03 22:07:19 +01:00
|
|
|
from zerver.tornado.socket import req_redis_key, respond_send_message
|
2017-07-08 04:38:13 +02:00
|
|
|
from confirmation.models import Confirmation, create_confirmation_link
|
2014-01-07 22:20:29 +01:00
|
|
|
from zerver.lib.db import reset_queries
|
2014-02-05 00:35:32 +01:00
|
|
|
from zerver.lib.redis_utils import get_redis_client
|
2016-11-08 10:07:47 +01:00
|
|
|
from zerver.context_processors import common_context
|
2017-07-24 07:51:18 +02:00
|
|
|
from zerver.lib.outgoing_webhook import do_rest_call, get_outgoing_webhook_service_handler
|
2019-05-17 00:54:56 +02:00
|
|
|
from zerver.models import get_bot_services, RealmAuditLog
|
2019-08-07 03:44:04 +02:00
|
|
|
from zulip_bots.lib import ExternalBotHandler, extract_query_without_mention
|
2018-02-08 15:51:38 +01:00
|
|
|
from zerver.lib.bot_lib import EmbeddedBotHandler, get_bot_handler, EmbeddedBotQuitException
|
2019-03-16 11:39:09 +01:00
|
|
|
from zerver.lib.exceptions import RateLimited
|
2019-03-27 00:57:33 +01:00
|
|
|
from zerver.lib.export import export_realm_wrapper
|
2013-10-10 20:39:43 +02:00
|
|
|
|
2013-09-03 22:33:20 +02:00
|
|
|
import os
|
2013-10-17 22:55:09 +02:00
|
|
|
import sys
|
2013-09-03 22:33:20 +02:00
|
|
|
import ujson
|
|
|
|
from collections import defaultdict
|
2013-12-17 22:37:51 +01:00
|
|
|
import email
|
2013-09-03 22:33:20 +02:00
|
|
|
import time
|
|
|
|
import datetime
|
|
|
|
import logging
|
2016-12-28 22:24:56 +01:00
|
|
|
import requests
|
2017-11-06 02:56:09 +01:00
|
|
|
from io import StringIO
|
2019-07-13 01:17:21 +02:00
|
|
|
import urllib
|
2017-05-25 20:41:29 +02:00
|
|
|
|
2017-12-20 18:08:35 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
2013-08-29 23:41:03 +02:00
|
|
|
|
2016-01-26 02:06:26 +01:00
|
|
|
class WorkerDeclarationException(Exception):
|
|
|
|
pass
|
|
|
|
|
2018-03-09 19:29:20 +01:00
|
|
|
ConcreteQueueWorker = TypeVar('ConcreteQueueWorker', bound='QueueProcessingWorker')
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def assign_queue(
|
|
|
|
queue_name: str, enabled: bool=True, queue_type: str="consumer"
|
|
|
|
) -> Callable[[Type[ConcreteQueueWorker]], Type[ConcreteQueueWorker]]:
|
|
|
|
def decorate(clazz: Type[ConcreteQueueWorker]) -> Type[ConcreteQueueWorker]:
|
2013-08-29 23:41:03 +02:00
|
|
|
clazz.queue_name = queue_name
|
2013-10-23 20:17:33 +02:00
|
|
|
if enabled:
|
2017-02-17 07:16:43 +01:00
|
|
|
register_worker(queue_name, clazz, queue_type)
|
2013-08-29 23:41:03 +02:00
|
|
|
return clazz
|
|
|
|
return decorate
|
|
|
|
|
2018-03-11 09:44:10 +01:00
|
|
|
worker_classes = {} # type: Dict[str, Type[QueueProcessingWorker]]
|
2018-03-09 19:29:20 +01:00
|
|
|
queues = {} # type: Dict[str, Dict[str, Type[QueueProcessingWorker]]]
|
2018-03-10 08:29:46 +01:00
|
|
|
def register_worker(queue_name: str, clazz: Type['QueueProcessingWorker'], queue_type: str) -> None:
|
2017-02-17 07:16:43 +01:00
|
|
|
if queue_type not in queues:
|
|
|
|
queues[queue_type] = {}
|
|
|
|
queues[queue_type][queue_name] = clazz
|
2013-08-29 23:41:03 +02:00
|
|
|
worker_classes[queue_name] = clazz
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def get_worker(queue_name: str) -> 'QueueProcessingWorker':
|
2013-08-29 23:41:03 +02:00
|
|
|
return worker_classes[queue_name]()
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def get_active_worker_queues(queue_type: Optional[str]=None) -> List[str]:
|
2017-02-17 07:16:43 +01:00
|
|
|
"""Returns all the non-test worker queues."""
|
|
|
|
if queue_type is None:
|
|
|
|
return list(worker_classes.keys())
|
|
|
|
return list(queues[queue_type].keys())
|
2013-10-23 20:50:21 +02:00
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def check_and_send_restart_signal() -> None:
|
2017-07-03 12:52:55 +02:00
|
|
|
try:
|
|
|
|
if not connection.is_usable():
|
|
|
|
logging.warning("*** Sending self SIGUSR1 to trigger a restart.")
|
|
|
|
os.kill(os.getpid(), signal.SIGUSR1)
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def retry_send_email_failures(
|
2018-03-10 19:57:20 +01:00
|
|
|
func: Callable[[ConcreteQueueWorker, Dict[str, Any]], None]
|
2018-03-10 08:29:46 +01:00
|
|
|
) -> Callable[['QueueProcessingWorker', Dict[str, Any]], None]:
|
2017-09-15 09:38:12 +02:00
|
|
|
|
|
|
|
@wraps(func)
|
2018-03-10 19:57:20 +01:00
|
|
|
def wrapper(worker: ConcreteQueueWorker, data: Dict[str, Any]) -> None:
|
2017-09-15 09:38:12 +02:00
|
|
|
try:
|
|
|
|
func(worker, data)
|
2018-01-30 20:06:23 +01:00
|
|
|
except (smtplib.SMTPServerDisconnected, socket.gaierror, EmailNotDeliveredException):
|
2018-03-10 08:29:46 +01:00
|
|
|
def on_failure(event: Dict[str, Any]) -> None:
|
2018-01-30 20:06:23 +01:00
|
|
|
logging.exception("Event {} failed".format(event))
|
2017-09-15 09:38:12 +02:00
|
|
|
|
|
|
|
retry_event(worker.queue_name, data, on_failure)
|
|
|
|
|
|
|
|
return wrapper
|
|
|
|
|
2017-11-05 11:53:59 +01:00
|
|
|
class QueueProcessingWorker:
|
2017-07-09 02:09:29 +02:00
|
|
|
queue_name = None # type: str
|
2016-01-26 02:06:26 +01:00
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def __init__(self) -> None:
|
2017-07-09 02:09:29 +02:00
|
|
|
self.q = None # type: SimpleQueueClient
|
2016-01-26 02:06:26 +01:00
|
|
|
if self.queue_name is None:
|
|
|
|
raise WorkerDeclarationException("Queue worker declared without queue_name")
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, data: Dict[str, Any]) -> None:
|
2016-01-26 02:06:26 +01:00
|
|
|
raise WorkerDeclarationException("No consumer defined!")
|
2013-08-29 23:41:03 +02:00
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume_wrapper(self, data: Dict[str, Any]) -> None:
|
2013-10-29 20:03:42 +01:00
|
|
|
try:
|
2013-11-01 19:02:11 +01:00
|
|
|
self.consume(data)
|
2013-10-29 20:03:42 +01:00
|
|
|
except Exception:
|
|
|
|
self._log_problem()
|
|
|
|
if not os.path.exists(settings.QUEUE_ERROR_DIR):
|
2017-11-10 12:43:53 +01:00
|
|
|
os.mkdir(settings.QUEUE_ERROR_DIR) # nocoverage
|
2013-10-29 20:03:42 +01:00
|
|
|
fname = '%s.errors' % (self.queue_name,)
|
|
|
|
fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
|
2017-11-04 07:05:51 +01:00
|
|
|
line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
|
2013-10-29 20:03:42 +01:00
|
|
|
lock_fn = fn + '.lock'
|
|
|
|
with lockfile(lock_fn):
|
2016-07-10 00:32:15 +02:00
|
|
|
with open(fn, 'ab') as f:
|
2016-01-27 02:25:31 +01:00
|
|
|
f.write(line.encode('utf-8'))
|
2017-07-03 12:52:55 +02:00
|
|
|
check_and_send_restart_signal()
|
|
|
|
finally:
|
|
|
|
reset_queries()
|
2013-10-29 20:03:42 +01:00
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def _log_problem(self) -> None:
|
2013-10-29 20:03:42 +01:00
|
|
|
logging.exception("Problem handling data on queue %s" % (self.queue_name,))
|
2013-10-17 18:55:23 +02:00
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def setup(self) -> None:
|
2015-11-24 07:01:35 +01:00
|
|
|
self.q = SimpleQueueClient()
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def start(self) -> None:
|
2013-10-29 20:03:42 +01:00
|
|
|
self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
|
2013-08-29 23:41:03 +02:00
|
|
|
self.q.start_consuming()
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def stop(self) -> None: # nocoverage
|
2013-08-29 23:41:03 +02:00
|
|
|
self.q.stop_consuming()
|
2013-09-03 22:33:20 +02:00
|
|
|
|
2017-11-03 22:34:12 +01:00
|
|
|
class LoopQueueProcessingWorker(QueueProcessingWorker):
|
|
|
|
sleep_delay = 0
|
|
|
|
|
2017-11-10 12:43:53 +01:00
|
|
|
def start(self) -> None: # nocoverage
|
2017-11-03 22:34:12 +01:00
|
|
|
while True:
|
|
|
|
# TODO: Probably it'd be better to share code with consume_wrapper()
|
|
|
|
events = self.q.drain_queue(self.queue_name, json=True)
|
|
|
|
try:
|
|
|
|
self.consume_batch(events)
|
|
|
|
finally:
|
|
|
|
reset_queries()
|
|
|
|
time.sleep(self.sleep_delay)
|
|
|
|
|
|
|
|
def consume_batch(self, event: List[Dict[str, Any]]) -> None:
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def consume(self, event: Dict[str, Any]) -> None:
|
|
|
|
"""In LoopQueueProcessingWorker, consume is used just for automated tests"""
|
|
|
|
self.consume_batch([event])
|
|
|
|
|
2013-11-01 19:31:00 +01:00
|
|
|
@assign_queue('signups')
|
2013-09-03 22:33:20 +02:00
|
|
|
class SignupWorker(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, data: Dict[str, Any]) -> None:
|
|
|
|
# TODO: This is the only implementation with Dict cf Mapping; should we simplify?
|
2017-09-22 22:57:35 +02:00
|
|
|
user_profile = get_user_profile_by_id(data['user_id'])
|
|
|
|
logging.info("Processing signup for user %s in realm %s" % (
|
|
|
|
user_profile.email, user_profile.realm.string_id))
|
2015-09-25 08:19:47 +02:00
|
|
|
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
|
2016-12-28 22:24:56 +01:00
|
|
|
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
|
|
|
|
(settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
|
|
|
|
params = dict(data)
|
2017-07-11 05:48:09 +02:00
|
|
|
del params['user_id']
|
2016-12-28 22:24:56 +01:00
|
|
|
params['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
|
|
|
|
params['status'] = 'subscribed'
|
|
|
|
r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10)
|
|
|
|
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
|
|
|
|
logging.warning("Attempted to sign up already existing email to list: %s" %
|
|
|
|
(data['email_address'],))
|
2017-10-06 06:41:18 +02:00
|
|
|
elif r.status_code == 400:
|
|
|
|
retry_event('signups', data, lambda e: r.raise_for_status())
|
2016-12-28 22:24:56 +01:00
|
|
|
else:
|
|
|
|
r.raise_for_status()
|
|
|
|
|
2013-11-01 19:31:00 +01:00
|
|
|
@assign_queue('invites')
|
2013-09-03 22:33:20 +02:00
|
|
|
class ConfirmationEmailWorker(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, data: Mapping[str, Any]) -> None:
|
2017-12-05 09:01:41 +01:00
|
|
|
if "email" in data:
|
|
|
|
# When upgrading from a version up through 1.7.1, there may be
|
|
|
|
# existing items in the queue with `email` instead of `prereg_id`.
|
|
|
|
invitee = PreregistrationUser.objects.filter(
|
|
|
|
email__iexact=data["email"].strip()).latest("invited_at")
|
|
|
|
else:
|
|
|
|
invitee = PreregistrationUser.objects.filter(id=data["prereg_id"]).first()
|
|
|
|
if invitee is None:
|
|
|
|
# The invitation could have been revoked
|
|
|
|
return
|
|
|
|
|
2017-05-10 23:23:59 +02:00
|
|
|
referrer = get_user_profile_by_id(data["referrer_id"])
|
2017-12-20 18:08:35 +01:00
|
|
|
logger.info("Sending invitation for realm %s to %s" % (referrer.realm.string_id, invitee.email))
|
2017-12-06 22:31:11 +01:00
|
|
|
do_send_confirmation_email(invitee, referrer)
|
2013-09-03 22:33:20 +02:00
|
|
|
|
2013-10-10 20:39:43 +02:00
|
|
|
# queue invitation reminder for two days from now.
|
2017-07-08 04:38:13 +02:00
|
|
|
link = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
|
2016-11-08 10:07:47 +01:00
|
|
|
context = common_context(referrer)
|
|
|
|
context.update({
|
|
|
|
'activate_url': link,
|
2017-05-10 23:29:51 +02:00
|
|
|
'referrer_name': referrer.full_name,
|
2019-08-22 22:29:51 +02:00
|
|
|
'referrer_email': referrer.delivery_email,
|
2017-05-04 06:34:30 +02:00
|
|
|
'referrer_realm_name': referrer.realm.name,
|
2016-11-08 10:07:47 +01:00
|
|
|
})
|
2017-05-03 18:20:16 +02:00
|
|
|
send_future_email(
|
2017-05-01 23:43:00 +02:00
|
|
|
"zerver/emails/invitation_reminder",
|
2017-12-05 03:19:48 +01:00
|
|
|
referrer.realm,
|
2018-12-03 23:26:51 +01:00
|
|
|
to_emails=[invitee.email],
|
2018-06-11 11:26:14 +02:00
|
|
|
from_address=FromAddress.tokenized_no_reply_address(),
|
2019-01-10 00:10:44 +01:00
|
|
|
language=referrer.realm.default_language,
|
2017-05-03 18:20:16 +02:00
|
|
|
context=context,
|
2017-05-04 06:51:05 +02:00
|
|
|
delay=datetime.timedelta(days=2))
|
2013-10-10 20:39:43 +02:00
|
|
|
|
2013-09-03 22:33:20 +02:00
|
|
|
@assign_queue('user_activity')
|
|
|
|
class UserActivityWorker(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2013-09-04 00:00:44 +02:00
|
|
|
user_profile = get_user_profile_by_id(event["user_profile_id"])
|
|
|
|
client = get_client(event["client"])
|
|
|
|
log_time = timestamp_to_datetime(event["time"])
|
|
|
|
query = event["query"]
|
|
|
|
do_update_user_activity(user_profile, client, query, log_time)
|
|
|
|
|
|
|
|
@assign_queue('user_activity_interval')
|
|
|
|
class UserActivityIntervalWorker(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2013-09-04 00:00:44 +02:00
|
|
|
user_profile = get_user_profile_by_id(event["user_profile_id"])
|
|
|
|
log_time = timestamp_to_datetime(event["time"])
|
|
|
|
do_update_user_activity_interval(user_profile, log_time)
|
|
|
|
|
|
|
|
@assign_queue('user_presence')
|
|
|
|
class UserPresenceWorker(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2019-04-20 01:00:46 +02:00
|
|
|
logging.debug("Received presence event: %s" % (event,),)
|
2013-09-04 00:00:44 +02:00
|
|
|
user_profile = get_user_profile_by_id(event["user_profile_id"])
|
|
|
|
client = get_client(event["client"])
|
|
|
|
log_time = timestamp_to_datetime(event["time"])
|
|
|
|
status = event["status"]
|
|
|
|
do_update_user_presence(user_profile, client, log_time, status)
|
2013-09-03 22:33:20 +02:00
|
|
|
|
2017-02-17 07:16:43 +01:00
|
|
|
@assign_queue('missedmessage_emails', queue_type="loop")
|
queue_processors: Rewrite MissedMessageWorker to always wait.
Previously, MissedMessageWorker used a batching strategy of just
grabbing all the events from the last 2 minutes, and then sending them
off as emails. This suffered from the problem that you had a random
time, between 0s and 120s, to edit your message before it would be
sent out via an email.
Additionally, this made the queue had to monitor, because it was
expected to pile up large numbers of events, even if everything was
fine.
We fix this by batching together the events using a timer; the queue
processor itself just tracks the items, and then a timer-handler
process takes care of ensuring that the emails get sent at least 120s
(and at most 130s) after the first triggering message was sent in Zulip.
This introduces a new unpleasant bug, namely that when we restart a
Zulip server, we can now lose some missed_message email events;
further work is required on this point.
Fixes #6839.
2018-10-24 21:08:38 +02:00
|
|
|
class MissedMessageWorker(QueueProcessingWorker):
|
|
|
|
# Aggregate all messages received over the last BATCH_DURATION
|
|
|
|
# seconds to let someone finish sending a batch of messages and/or
|
|
|
|
# editing them before they are sent out as emails to recipients.
|
|
|
|
#
|
|
|
|
# The timer is running whenever; we poll at most every TIMER_FREQUENCY
|
|
|
|
# seconds, to avoid excessive activity.
|
|
|
|
#
|
|
|
|
# TODO: Since this process keeps events in memory for up to 2
|
|
|
|
# minutes, it now will lose approximately BATCH_DURATION worth of
|
|
|
|
# missed_message emails whenever it is restarted as part of a
|
|
|
|
# server restart. We should probably add some sort of save/reload
|
|
|
|
# mechanism for that case.
|
|
|
|
TIMER_FREQUENCY = 5
|
|
|
|
BATCH_DURATION = 120
|
|
|
|
timer_event = None # type: Optional[Timer]
|
|
|
|
events_by_recipient = defaultdict(list) # type: Dict[int, List[Dict[str, Any]]]
|
|
|
|
batch_start_by_recipient = {} # type: Dict[int, float]
|
2013-09-03 22:33:20 +02:00
|
|
|
|
queue_processors: Rewrite MissedMessageWorker to always wait.
Previously, MissedMessageWorker used a batching strategy of just
grabbing all the events from the last 2 minutes, and then sending them
off as emails. This suffered from the problem that you had a random
time, between 0s and 120s, to edit your message before it would be
sent out via an email.
Additionally, this made the queue had to monitor, because it was
expected to pile up large numbers of events, even if everything was
fine.
We fix this by batching together the events using a timer; the queue
processor itself just tracks the items, and then a timer-handler
process takes care of ensuring that the emails get sent at least 120s
(and at most 130s) after the first triggering message was sent in Zulip.
This introduces a new unpleasant bug, namely that when we restart a
Zulip server, we can now lose some missed_message email events;
further work is required on this point.
Fixes #6839.
2018-10-24 21:08:38 +02:00
|
|
|
def consume(self, event: Dict[str, Any]) -> None:
|
|
|
|
logging.debug("Received missedmessage_emails event: %s" % (event,))
|
|
|
|
|
|
|
|
# When we process an event, just put it into the queue and ensure we have a timer going.
|
|
|
|
user_profile_id = event['user_profile_id']
|
|
|
|
if user_profile_id not in self.batch_start_by_recipient:
|
|
|
|
self.batch_start_by_recipient[user_profile_id] = time.time()
|
|
|
|
self.events_by_recipient[user_profile_id].append(event)
|
2013-09-03 22:33:20 +02:00
|
|
|
|
queue_processors: Rewrite MissedMessageWorker to always wait.
Previously, MissedMessageWorker used a batching strategy of just
grabbing all the events from the last 2 minutes, and then sending them
off as emails. This suffered from the problem that you had a random
time, between 0s and 120s, to edit your message before it would be
sent out via an email.
Additionally, this made the queue had to monitor, because it was
expected to pile up large numbers of events, even if everything was
fine.
We fix this by batching together the events using a timer; the queue
processor itself just tracks the items, and then a timer-handler
process takes care of ensuring that the emails get sent at least 120s
(and at most 130s) after the first triggering message was sent in Zulip.
This introduces a new unpleasant bug, namely that when we restart a
Zulip server, we can now lose some missed_message email events;
further work is required on this point.
Fixes #6839.
2018-10-24 21:08:38 +02:00
|
|
|
self.ensure_timer()
|
2013-09-03 22:33:20 +02:00
|
|
|
|
queue_processors: Rewrite MissedMessageWorker to always wait.
Previously, MissedMessageWorker used a batching strategy of just
grabbing all the events from the last 2 minutes, and then sending them
off as emails. This suffered from the problem that you had a random
time, between 0s and 120s, to edit your message before it would be
sent out via an email.
Additionally, this made the queue had to monitor, because it was
expected to pile up large numbers of events, even if everything was
fine.
We fix this by batching together the events using a timer; the queue
processor itself just tracks the items, and then a timer-handler
process takes care of ensuring that the emails get sent at least 120s
(and at most 130s) after the first triggering message was sent in Zulip.
This introduces a new unpleasant bug, namely that when we restart a
Zulip server, we can now lose some missed_message email events;
further work is required on this point.
Fixes #6839.
2018-10-24 21:08:38 +02:00
|
|
|
def ensure_timer(self) -> None:
|
|
|
|
if self.timer_event is not None:
|
|
|
|
return
|
|
|
|
self.timer_event = Timer(self.TIMER_FREQUENCY, MissedMessageWorker.maybe_send_batched_emails, [self])
|
|
|
|
self.timer_event.start()
|
|
|
|
|
|
|
|
def stop_timer(self) -> None:
|
2019-08-02 22:53:53 +02:00
|
|
|
if self.timer_event and self.timer_event.is_alive():
|
queue_processors: Rewrite MissedMessageWorker to always wait.
Previously, MissedMessageWorker used a batching strategy of just
grabbing all the events from the last 2 minutes, and then sending them
off as emails. This suffered from the problem that you had a random
time, between 0s and 120s, to edit your message before it would be
sent out via an email.
Additionally, this made the queue had to monitor, because it was
expected to pile up large numbers of events, even if everything was
fine.
We fix this by batching together the events using a timer; the queue
processor itself just tracks the items, and then a timer-handler
process takes care of ensuring that the emails get sent at least 120s
(and at most 130s) after the first triggering message was sent in Zulip.
This introduces a new unpleasant bug, namely that when we restart a
Zulip server, we can now lose some missed_message email events;
further work is required on this point.
Fixes #6839.
2018-10-24 21:08:38 +02:00
|
|
|
self.timer_event.cancel()
|
|
|
|
self.timer_event = None
|
|
|
|
|
|
|
|
def maybe_send_batched_emails(self) -> None:
|
|
|
|
self.stop_timer()
|
|
|
|
|
|
|
|
current_time = time.time()
|
|
|
|
for user_profile_id, timestamp in list(self.batch_start_by_recipient.items()):
|
|
|
|
if current_time - timestamp < self.BATCH_DURATION:
|
|
|
|
continue
|
|
|
|
events = self.events_by_recipient[user_profile_id]
|
2018-10-24 20:21:51 +02:00
|
|
|
logging.info("Batch-processing %s missedmessage_emails events for user %s" %
|
|
|
|
(len(events), user_profile_id))
|
2017-11-03 22:34:12 +01:00
|
|
|
handle_missedmessage_emails(user_profile_id, events)
|
queue_processors: Rewrite MissedMessageWorker to always wait.
Previously, MissedMessageWorker used a batching strategy of just
grabbing all the events from the last 2 minutes, and then sending them
off as emails. This suffered from the problem that you had a random
time, between 0s and 120s, to edit your message before it would be
sent out via an email.
Additionally, this made the queue had to monitor, because it was
expected to pile up large numbers of events, even if everything was
fine.
We fix this by batching together the events using a timer; the queue
processor itself just tracks the items, and then a timer-handler
process takes care of ensuring that the emails get sent at least 120s
(and at most 130s) after the first triggering message was sent in Zulip.
This introduces a new unpleasant bug, namely that when we restart a
Zulip server, we can now lose some missed_message email events;
further work is required on this point.
Fixes #6839.
2018-10-24 21:08:38 +02:00
|
|
|
del self.events_by_recipient[user_profile_id]
|
|
|
|
del self.batch_start_by_recipient[user_profile_id]
|
|
|
|
|
|
|
|
# By only restarting the timer if there are actually events in
|
|
|
|
# the queue, we ensure this queue processor is idle when there
|
|
|
|
# are no missed-message emails to process.
|
|
|
|
if len(self.batch_start_by_recipient) > 0:
|
|
|
|
self.ensure_timer()
|
2013-09-30 17:53:46 +02:00
|
|
|
|
2017-11-29 08:25:57 +01:00
|
|
|
@assign_queue('email_senders')
|
|
|
|
class EmailSendingWorker(QueueProcessingWorker):
|
2017-09-15 09:38:12 +02:00
|
|
|
@retry_send_email_failures
|
2018-01-30 20:06:23 +01:00
|
|
|
def consume(self, event: Dict[str, Any]) -> None:
|
|
|
|
# Copy the event, so that we don't pass the `failed_tries'
|
|
|
|
# data to send_email_from_dict (which neither takes that
|
|
|
|
# argument nor needs that data).
|
|
|
|
copied_event = copy.deepcopy(event)
|
|
|
|
if 'failed_tries' in copied_event:
|
|
|
|
del copied_event['failed_tries']
|
2018-12-04 23:34:04 +01:00
|
|
|
handle_send_email_format_changes(copied_event)
|
2018-01-30 20:06:23 +01:00
|
|
|
send_email_from_dict(copied_event)
|
2017-03-06 08:45:59 +01:00
|
|
|
|
2018-02-28 20:15:53 +01:00
|
|
|
@assign_queue('missedmessage_email_senders')
|
2018-03-04 20:44:44 +01:00
|
|
|
class MissedMessageSendingWorker(EmailSendingWorker): # nocoverage
|
2018-02-28 20:15:53 +01:00
|
|
|
"""
|
|
|
|
Note: Class decorators are not inherited.
|
|
|
|
|
|
|
|
The `missedmessage_email_senders` queue was used up through 1.7.1, so we
|
|
|
|
keep consuming from it in case we've just upgraded from an old version.
|
|
|
|
After the 1.8 release, we can delete it and tell admins to upgrade to 1.8
|
|
|
|
first.
|
|
|
|
"""
|
|
|
|
# TODO: zulip-1.8: Delete code related to missedmessage_email_senders queue.
|
|
|
|
pass
|
|
|
|
|
2013-11-19 00:55:24 +01:00
|
|
|
@assign_queue('missedmessage_mobile_notifications')
|
2018-02-25 23:52:38 +01:00
|
|
|
class PushNotificationsWorker(QueueProcessingWorker): # nocoverage
|
2018-11-27 18:12:11 +01:00
|
|
|
def start(self) -> None:
|
|
|
|
# initialize_push_notifications doesn't strictly do anything
|
|
|
|
# beyond printing some logging warnings if push notifications
|
|
|
|
# are not available in the current configuration.
|
|
|
|
initialize_push_notifications()
|
|
|
|
super().start()
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, data: Mapping[str, Any]) -> None:
|
2018-08-02 01:29:06 +02:00
|
|
|
if data.get("type", "add") == "remove":
|
2019-02-14 01:08:51 +01:00
|
|
|
message_ids = data.get('message_ids')
|
|
|
|
if message_ids is None: # legacy task across an upgrade
|
|
|
|
message_ids = [data['message_id']]
|
|
|
|
handle_remove_push_notification(data['user_profile_id'], message_ids)
|
2018-09-12 20:46:27 +02:00
|
|
|
else:
|
|
|
|
handle_push_notification(data['user_profile_id'], data)
|
2013-11-19 00:55:24 +01:00
|
|
|
|
2015-08-21 08:02:44 +02:00
|
|
|
# We probably could stop running this queue worker at all if ENABLE_FEEDBACK is False
|
2013-10-17 22:55:09 +02:00
|
|
|
@assign_queue('feedback_messages')
|
|
|
|
class FeedbackBot(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2016-10-29 09:00:44 +02:00
|
|
|
logging.info("Received feedback from %s" % (event["sender_email"],))
|
2017-03-06 08:43:20 +01:00
|
|
|
handle_feedback(event)
|
2013-10-17 22:55:09 +02:00
|
|
|
|
2013-11-13 19:12:22 +01:00
|
|
|
@assign_queue('error_reports')
|
|
|
|
class ErrorReporter(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2017-01-24 07:56:37 +01:00
|
|
|
logging.info("Processing traceback with type %s for %s" % (event['type'], event.get('user_email')))
|
2017-10-24 06:14:22 +02:00
|
|
|
if settings.ERROR_REPORTING:
|
2017-01-28 21:04:35 +01:00
|
|
|
do_report_error(event['report']['host'], event['type'], event['report'])
|
2013-11-13 19:12:22 +01:00
|
|
|
|
2017-02-17 07:16:43 +01:00
|
|
|
@assign_queue('slow_queries', queue_type="loop")
|
2017-11-03 22:34:12 +01:00
|
|
|
class SlowQueryWorker(LoopQueueProcessingWorker):
|
|
|
|
# Sleep 1 minute between checking the queue
|
|
|
|
sleep_delay = 60 * 1
|
2013-11-13 02:14:15 +01:00
|
|
|
|
2019-07-22 22:08:03 +02:00
|
|
|
def consume_batch(self, slow_query_events: List[Dict[str, Any]]) -> None:
|
|
|
|
for event in slow_query_events:
|
|
|
|
logging.info("Slow query: %s" % (event["query"],))
|
2017-09-22 23:07:57 +02:00
|
|
|
|
2018-07-31 02:37:24 +02:00
|
|
|
if settings.SLOW_QUERY_LOGS_STREAM is None:
|
2018-05-19 03:21:15 +02:00
|
|
|
return
|
|
|
|
|
2013-11-13 01:55:06 +01:00
|
|
|
if settings.ERROR_BOT is None:
|
|
|
|
return
|
2013-09-30 17:53:46 +02:00
|
|
|
|
2019-07-22 22:08:03 +02:00
|
|
|
if len(slow_query_events) > 0:
|
2015-11-17 05:16:53 +01:00
|
|
|
topic = "%s: slow queries" % (settings.EXTERNAL_HOST,)
|
2013-09-30 17:53:46 +02:00
|
|
|
|
2013-11-13 01:55:06 +01:00
|
|
|
content = ""
|
2019-07-22 22:08:03 +02:00
|
|
|
for event in slow_query_events:
|
|
|
|
content += " %s\n" % (event["query"],)
|
2013-09-30 17:53:46 +02:00
|
|
|
|
2017-05-22 23:37:15 +02:00
|
|
|
error_bot_realm = get_system_bot(settings.ERROR_BOT).realm
|
2017-01-22 05:23:36 +01:00
|
|
|
internal_send_message(error_bot_realm, settings.ERROR_BOT,
|
2018-07-31 02:37:24 +02:00
|
|
|
"stream", settings.SLOW_QUERY_LOGS_STREAM, topic, content)
|
2013-09-07 00:27:10 +02:00
|
|
|
|
|
|
|
@assign_queue("message_sender")
|
|
|
|
class MessageSenderWorker(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def __init__(self) -> None:
|
2017-10-27 08:28:23 +02:00
|
|
|
super().__init__()
|
2014-02-05 00:35:32 +01:00
|
|
|
self.redis_client = get_redis_client()
|
2013-11-08 02:02:48 +01:00
|
|
|
self.handler = BaseHandler()
|
|
|
|
self.handler.load_middleware()
|
2013-11-05 23:05:03 +01:00
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2013-11-08 02:02:48 +01:00
|
|
|
server_meta = event['server_meta']
|
|
|
|
|
2017-02-17 23:56:42 +01:00
|
|
|
environ = {
|
|
|
|
'REQUEST_METHOD': 'SOCKET',
|
|
|
|
'SCRIPT_NAME': '',
|
|
|
|
'PATH_INFO': '/json/messages',
|
|
|
|
'SERVER_NAME': '127.0.0.1',
|
|
|
|
'SERVER_PORT': 9993,
|
|
|
|
'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
|
|
|
|
'wsgi.version': (1, 0),
|
|
|
|
'wsgi.input': StringIO(),
|
|
|
|
'wsgi.errors': sys.stderr,
|
|
|
|
'wsgi.multithread': False,
|
|
|
|
'wsgi.multiprocess': True,
|
|
|
|
'wsgi.run_once': False,
|
|
|
|
'zulip.emulated_method': 'POST'
|
|
|
|
}
|
2017-02-17 23:59:25 +01:00
|
|
|
|
|
|
|
if 'socket_user_agent' in event['request']:
|
|
|
|
environ['HTTP_USER_AGENT'] = event['request']['socket_user_agent']
|
|
|
|
del event['request']['socket_user_agent']
|
|
|
|
|
2013-11-08 02:02:48 +01:00
|
|
|
# We're mostly using a WSGIRequest for convenience
|
|
|
|
environ.update(server_meta['request_environ'])
|
|
|
|
request = WSGIRequest(environ)
|
2016-11-05 19:30:59 +01:00
|
|
|
# Note: If we ever support non-POST methods, we'll need to change this.
|
|
|
|
request._post = event['request']
|
2013-11-08 02:02:48 +01:00
|
|
|
request.csrf_processing_done = True
|
|
|
|
|
|
|
|
user_profile = get_user_profile_by_id(server_meta['user_id'])
|
|
|
|
request._cached_user = user_profile
|
|
|
|
|
|
|
|
resp = self.handler.get_response(request)
|
2013-11-08 23:11:37 +01:00
|
|
|
server_meta['time_request_finished'] = time.time()
|
|
|
|
server_meta['worker_log_data'] = request._log_data
|
2013-11-08 02:02:48 +01:00
|
|
|
|
2016-07-12 15:10:01 +02:00
|
|
|
resp_content = resp.content.decode('utf-8')
|
2017-07-03 12:52:55 +02:00
|
|
|
response_data = ujson.loads(resp_content)
|
|
|
|
if response_data['result'] == 'error':
|
|
|
|
check_and_send_restart_signal()
|
|
|
|
|
2017-07-07 09:12:58 +02:00
|
|
|
result = {'response': response_data, 'req_id': event['req_id'],
|
2013-11-08 02:02:48 +01:00
|
|
|
'server_meta': server_meta}
|
|
|
|
|
2014-01-17 21:35:25 +01:00
|
|
|
redis_key = req_redis_key(event['req_id'])
|
2013-11-05 23:05:03 +01:00
|
|
|
self.redis_client.hmset(redis_key, {'status': 'complete',
|
2016-11-09 13:44:29 +01:00
|
|
|
'response': resp_content})
|
2013-11-05 23:05:03 +01:00
|
|
|
|
2017-11-03 22:07:19 +01:00
|
|
|
queue_json_publish(server_meta['return_queue'], result,
|
|
|
|
respond_send_message)
|
2013-09-07 00:27:10 +02:00
|
|
|
|
2013-10-21 23:26:41 +02:00
|
|
|
@assign_queue('digest_emails')
|
2018-02-25 23:52:38 +01:00
|
|
|
class DigestWorker(QueueProcessingWorker): # nocoverage
|
2013-10-30 20:48:04 +01:00
|
|
|
# Who gets a digest is entirely determined by the enqueue_digest_emails
|
2013-10-21 23:26:41 +02:00
|
|
|
# management command, not here.
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2013-10-28 20:56:43 +01:00
|
|
|
logging.info("Received digest event: %s" % (event,))
|
|
|
|
handle_digest_email(event["user_profile_id"], event["cutoff"])
|
2013-10-28 20:45:35 +01:00
|
|
|
|
2013-12-17 22:37:51 +01:00
|
|
|
@assign_queue('email_mirror')
|
|
|
|
class MirrorWorker(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2019-03-16 11:39:09 +01:00
|
|
|
rcpt_to = event['rcpt_to']
|
|
|
|
if not is_missed_message_address(rcpt_to):
|
|
|
|
# Missed message addresses are one-time use, so we don't need
|
|
|
|
# to worry about emails to them resulting in message spam.
|
|
|
|
recipient_realm = extract_and_validate(rcpt_to)[0].realm
|
|
|
|
try:
|
|
|
|
rate_limit_mirror_by_realm(recipient_realm)
|
|
|
|
except RateLimited:
|
|
|
|
msg = email.message_from_string(event["message"])
|
|
|
|
logger.warning("MirrorWorker: Rejecting an email from: %s "
|
|
|
|
"to realm: %s - rate limited."
|
|
|
|
% (msg['From'], recipient_realm.name))
|
|
|
|
return
|
|
|
|
|
2018-11-27 20:24:23 +01:00
|
|
|
mirror_email(email.message_from_string(event["message"]),
|
2019-03-16 11:39:09 +01:00
|
|
|
rcpt_to=rcpt_to, pre_checked=True)
|
2013-12-17 22:37:51 +01:00
|
|
|
|
2017-02-17 07:16:43 +01:00
|
|
|
@assign_queue('test', queue_type="test")
|
2013-10-30 16:01:18 +01:00
|
|
|
class TestWorker(QueueProcessingWorker):
|
|
|
|
# This worker allows you to test the queue worker infrastructure without
|
|
|
|
# creating significant side effects. It can be useful in development or
|
|
|
|
# for troubleshooting prod/staging. It pulls a message off the test queue
|
|
|
|
# and appends it to a file in /tmp.
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None: # nocoverage
|
2013-10-30 16:01:18 +01:00
|
|
|
fn = settings.ZULIP_WORKER_TEST_FILE
|
|
|
|
message = ujson.dumps(event)
|
|
|
|
logging.info("TestWorker should append this message to %s: %s" % (fn, message))
|
|
|
|
with open(fn, 'a') as f:
|
|
|
|
f.write(message + '\n')
|
2016-10-27 12:06:44 +02:00
|
|
|
|
|
|
|
@assign_queue('embed_links')
|
|
|
|
class FetchLinksEmbedData(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2016-10-27 12:06:44 +02:00
|
|
|
for url in event['urls']:
|
|
|
|
url_preview.get_link_embed_data(url)
|
|
|
|
|
|
|
|
message = Message.objects.get(id=event['message_id'])
|
|
|
|
# If the message changed, we will run this task after updating the message
|
|
|
|
# in zerver.views.messages.update_message_backend
|
|
|
|
if message.content != event['message_content']:
|
|
|
|
return
|
|
|
|
if message.content is not None:
|
2017-09-09 02:50:57 +02:00
|
|
|
query = UserMessage.objects.filter(
|
|
|
|
message=message.id
|
|
|
|
)
|
|
|
|
message_user_ids = set(query.values_list('user_profile_id', flat=True))
|
2017-01-22 05:55:30 +01:00
|
|
|
|
|
|
|
# Fetch the realm whose settings we're using for rendering
|
|
|
|
realm = Realm.objects.get(id=event['message_realm_id'])
|
|
|
|
|
2016-10-27 12:06:44 +02:00
|
|
|
# If rendering fails, the called code will raise a JsonableError.
|
|
|
|
rendered_content = render_incoming_message(
|
|
|
|
message,
|
2017-01-22 05:55:30 +01:00
|
|
|
message.content,
|
2017-09-09 02:50:57 +02:00
|
|
|
message_user_ids,
|
2017-01-22 05:55:30 +01:00
|
|
|
realm)
|
2016-10-27 12:06:44 +02:00
|
|
|
do_update_embedded_data(
|
|
|
|
message.sender, message, message.content, rendered_content)
|
2017-04-20 22:04:08 +02:00
|
|
|
|
|
|
|
@assign_queue('outgoing_webhooks')
|
|
|
|
class OutgoingWebhookWorker(QueueProcessingWorker):
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2016-07-23 07:51:30 +02:00
|
|
|
message = event['message']
|
|
|
|
dup_event = cast(Dict[str, Any], event)
|
|
|
|
dup_event['command'] = message['content']
|
|
|
|
|
2017-05-26 16:37:45 +02:00
|
|
|
services = get_bot_services(event['user_profile_id'])
|
2016-07-23 07:51:30 +02:00
|
|
|
for service in services:
|
|
|
|
dup_event['service_name'] = str(service.name)
|
2017-05-26 16:37:45 +02:00
|
|
|
service_handler = get_outgoing_webhook_service_handler(service)
|
2018-10-11 00:45:19 +02:00
|
|
|
request_data = service_handler.build_bot_request(dup_event)
|
|
|
|
if request_data:
|
2018-10-11 00:24:55 +02:00
|
|
|
do_rest_call(service.base_url,
|
|
|
|
request_data,
|
|
|
|
dup_event,
|
|
|
|
service_handler)
|
2017-05-25 20:41:29 +02:00
|
|
|
|
|
|
|
@assign_queue('embedded_bots')
|
|
|
|
class EmbeddedBotWorker(QueueProcessingWorker):
|
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def get_bot_api_client(self, user_profile: UserProfile) -> EmbeddedBotHandler:
|
2017-06-20 12:22:55 +02:00
|
|
|
return EmbeddedBotHandler(user_profile)
|
2017-05-25 20:41:29 +02:00
|
|
|
|
2018-03-10 08:29:46 +01:00
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
2017-05-25 20:41:29 +02:00
|
|
|
user_profile_id = event['user_profile_id']
|
|
|
|
user_profile = get_user_profile_by_id(user_profile_id)
|
|
|
|
|
|
|
|
message = cast(Dict[str, Any], event['message'])
|
|
|
|
|
|
|
|
# TODO: Do we actually want to allow multiple Services per bot user?
|
|
|
|
services = get_bot_services(user_profile_id)
|
|
|
|
for service in services:
|
2017-07-25 19:03:09 +02:00
|
|
|
bot_handler = get_bot_handler(str(service.name))
|
|
|
|
if bot_handler is None:
|
2017-10-27 02:36:54 +02:00
|
|
|
logging.error("Error: User %s has bot with invalid embedded bot service %s" % (
|
|
|
|
user_profile_id, service.name))
|
2017-07-25 19:03:09 +02:00
|
|
|
continue
|
2018-02-08 15:51:38 +01:00
|
|
|
try:
|
|
|
|
if hasattr(bot_handler, 'initialize'):
|
2019-01-31 14:32:37 +01:00
|
|
|
bot_handler.initialize(self.get_bot_api_client(user_profile))
|
2018-02-08 15:51:38 +01:00
|
|
|
if event['trigger'] == 'mention':
|
|
|
|
message['content'] = extract_query_without_mention(
|
|
|
|
message=message,
|
2019-08-07 03:44:04 +02:00
|
|
|
client=cast(ExternalBotHandler, self.get_bot_api_client(user_profile)),
|
2018-02-08 15:51:38 +01:00
|
|
|
)
|
2018-02-25 19:52:47 +01:00
|
|
|
assert message['content'] is not None
|
2018-02-08 15:51:38 +01:00
|
|
|
bot_handler.handle_message(
|
2017-10-10 14:29:04 +02:00
|
|
|
message=message,
|
2018-02-08 15:51:38 +01:00
|
|
|
bot_handler=self.get_bot_api_client(user_profile)
|
2017-10-10 14:29:04 +02:00
|
|
|
)
|
2018-02-08 15:51:38 +01:00
|
|
|
except EmbeddedBotQuitException as e:
|
|
|
|
logging.warning(str(e))
|
2017-11-13 21:24:51 +01:00
|
|
|
|
|
|
|
@assign_queue('deferred_work')
|
|
|
|
class DeferredWorker(QueueProcessingWorker):
|
|
|
|
def consume(self, event: Mapping[str, Any]) -> None:
|
|
|
|
if event['type'] == 'mark_stream_messages_as_read':
|
|
|
|
user_profile = get_user_profile_by_id(event['user_profile_id'])
|
2018-03-14 00:09:11 +01:00
|
|
|
client = Client.objects.get(id=event['client_id'])
|
2017-11-13 21:24:51 +01:00
|
|
|
|
|
|
|
for stream_id in event['stream_ids']:
|
2017-11-29 23:35:33 +01:00
|
|
|
# Since the user just unsubscribed, we don't require
|
|
|
|
# an active Subscription object (otherwise, private
|
|
|
|
# streams would never be accessible)
|
|
|
|
(stream, recipient, sub) = access_stream_by_id(user_profile, stream_id,
|
|
|
|
require_active=False)
|
2018-03-14 00:09:11 +01:00
|
|
|
do_mark_stream_messages_as_read(user_profile, client, stream)
|
2019-06-24 02:51:13 +02:00
|
|
|
elif event['type'] == 'realm_export':
|
2019-08-13 03:02:02 +02:00
|
|
|
start = time.time()
|
2019-03-27 00:57:33 +01:00
|
|
|
realm = Realm.objects.get(id=event['realm_id'])
|
|
|
|
output_dir = tempfile.mkdtemp(prefix="zulip-export-")
|
|
|
|
|
2019-05-11 09:57:33 +02:00
|
|
|
public_url = export_realm_wrapper(realm=realm, output_dir=output_dir,
|
|
|
|
threads=6, upload=True, public_only=True,
|
|
|
|
delete_after_upload=True)
|
2019-03-27 00:57:33 +01:00
|
|
|
assert public_url is not None
|
|
|
|
|
2019-08-11 20:17:16 +02:00
|
|
|
# Update the extra_data field now that the export is complete.
|
2019-05-17 00:54:56 +02:00
|
|
|
export_event = RealmAuditLog.objects.get(id=event['id'])
|
2019-08-11 20:17:16 +02:00
|
|
|
export_event.extra_data = ujson.dumps(dict(
|
|
|
|
export_path=urllib.parse.urlparse(public_url).path,
|
|
|
|
))
|
2019-05-17 00:54:56 +02:00
|
|
|
export_event.save(update_fields=['extra_data'])
|
|
|
|
|
2019-03-27 00:57:33 +01:00
|
|
|
# Send a private message notification letting the user who
|
|
|
|
# triggered the export know the export finished.
|
|
|
|
user_profile = get_user_profile_by_id(event['user_profile_id'])
|
|
|
|
content = "Your data export is complete and has been uploaded here:\n\n%s" % (
|
|
|
|
public_url,)
|
|
|
|
internal_send_private_message(
|
|
|
|
realm=user_profile.realm,
|
|
|
|
sender=get_system_bot(settings.NOTIFICATION_BOT),
|
|
|
|
recipient_user=user_profile,
|
|
|
|
content=content
|
|
|
|
)
|
|
|
|
|
|
|
|
# For future frontend use, also notify administrator
|
2019-06-24 02:51:13 +02:00
|
|
|
# clients that the export happened.
|
2019-08-02 00:14:58 +02:00
|
|
|
notify_realm_export(user_profile)
|
2019-08-13 03:02:02 +02:00
|
|
|
logging.info("Completed data export for %s in %s" % (
|
|
|
|
user_profile.realm.string_id, time.time() - start))
|