2017-11-30 00:03:59 +01:00
|
|
|
import logging
|
|
|
|
import random
|
|
|
|
import threading
|
|
|
|
import time
|
2020-06-11 00:54:34 +02:00
|
|
|
from collections import defaultdict
|
2020-10-10 05:28:27 +02:00
|
|
|
from typing import Any, Callable, Dict, List, Mapping, Optional, Set
|
2017-11-30 00:03:59 +01:00
|
|
|
|
2020-08-07 01:09:47 +02:00
|
|
|
import orjson
|
2013-01-11 21:16:42 +01:00
|
|
|
import pika
|
2019-01-31 08:10:34 +01:00
|
|
|
import pika.adapters.tornado_connection
|
2020-06-11 00:54:34 +02:00
|
|
|
from django.conf import settings
|
2016-07-03 15:58:27 +02:00
|
|
|
from pika.adapters.blocking_connection import BlockingChannel
|
|
|
|
from pika.spec import Basic
|
2017-11-30 00:40:45 +01:00
|
|
|
from tornado import ioloop
|
2013-01-11 21:16:42 +01:00
|
|
|
|
2013-07-29 23:03:31 +02:00
|
|
|
from zerver.lib.utils import statsd
|
2016-07-03 15:58:27 +02:00
|
|
|
|
2017-08-18 07:56:53 +02:00
|
|
|
MAX_REQUEST_RETRIES = 3
|
2020-08-07 09:35:25 +02:00
|
|
|
Consumer = Callable[[BlockingChannel, Basic.Deliver, pika.BasicProperties, bytes], None]
|
2013-04-16 22:58:21 +02:00
|
|
|
|
2013-01-11 21:16:42 +01:00
|
|
|
# This simple queuing library doesn't expose much of the power of
|
|
|
|
# rabbitmq/pika's queuing system; its purpose is to just provide an
|
|
|
|
# interface for external files to put things into queues and take them
|
|
|
|
# out from bots without having to import pika code all over our codebase.
|
2017-11-05 11:37:41 +01:00
|
|
|
class SimpleQueueClient:
|
2018-03-20 02:08:52 +01:00
|
|
|
def __init__(self,
|
|
|
|
# Disable RabbitMQ heartbeats by default because BlockingConnection can't process them
|
|
|
|
rabbitmq_heartbeat: Optional[int] = 0,
|
|
|
|
) -> None:
|
2013-08-06 22:51:47 +02:00
|
|
|
self.log = logging.getLogger('zulip.queue')
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
self.queues: Set[str] = set()
|
|
|
|
self.channel: Optional[BlockingChannel] = None
|
|
|
|
self.consumers: Dict[str, Set[Consumer]] = defaultdict(set)
|
2018-03-20 02:08:52 +01:00
|
|
|
self.rabbitmq_heartbeat = rabbitmq_heartbeat
|
2020-10-09 22:50:53 +02:00
|
|
|
self.is_consuming = False
|
2013-01-18 19:15:09 +01:00
|
|
|
self._connect()
|
2013-01-11 21:16:42 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def _connect(self) -> None:
|
2013-09-30 19:40:24 +02:00
|
|
|
start = time.time()
|
2013-01-18 19:15:09 +01:00
|
|
|
self.connection = pika.BlockingConnection(self._get_parameters())
|
|
|
|
self.channel = self.connection.channel()
|
2020-06-10 06:41:04 +02:00
|
|
|
self.log.info(f'SimpleQueueClient connected (connecting took {time.time() - start:.3f}s)')
|
2013-01-18 19:15:09 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def _reconnect(self) -> None:
|
2013-03-11 19:53:41 +01:00
|
|
|
self.connection = None
|
|
|
|
self.channel = None
|
|
|
|
self.queues = set()
|
|
|
|
self._connect()
|
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def _get_parameters(self) -> pika.ConnectionParameters:
|
2016-05-08 00:50:15 +02:00
|
|
|
credentials = pika.PlainCredentials(settings.RABBITMQ_USERNAME,
|
|
|
|
settings.RABBITMQ_PASSWORD)
|
2019-07-04 04:08:19 +02:00
|
|
|
|
|
|
|
# With BlockingConnection, we are passed
|
|
|
|
# self.rabbitmq_heartbeat=0, which asks to explicitly disable
|
|
|
|
# the RabbitMQ heartbeat feature. This is correct since that
|
|
|
|
# heartbeat doesn't make sense with BlockingConnection (we do
|
|
|
|
# need it for TornadoConnection).
|
|
|
|
#
|
|
|
|
# Where we've disabled RabbitMQ's heartbeat, the only
|
|
|
|
# keepalive on this connection is the TCP keepalive (defaults:
|
|
|
|
# `/proc/sys/net/ipv4/tcp_keepalive_*`). On most Linux
|
|
|
|
# systems, the default is to start sending keepalive packets
|
|
|
|
# after TCP_KEEPIDLE (7200 seconds) of inactivity; after that
|
|
|
|
# point, it send them every TCP_KEEPINTVL (typically 75s).
|
|
|
|
# Some Kubernetes / Docker Swarm networks can kill "idle" TCP
|
|
|
|
# connections after as little as ~15 minutes of inactivity.
|
|
|
|
# To avoid this killing our RabbitMQ connections, we set
|
|
|
|
# TCP_KEEPIDLE to something significantly below 15 minutes.
|
|
|
|
tcp_options = None
|
|
|
|
if self.rabbitmq_heartbeat == 0:
|
|
|
|
tcp_options = dict(TCP_KEEPIDLE=60 * 5)
|
|
|
|
|
2016-01-21 12:52:24 +01:00
|
|
|
return pika.ConnectionParameters(settings.RABBITMQ_HOST,
|
2019-10-09 04:38:43 +02:00
|
|
|
heartbeat=self.rabbitmq_heartbeat,
|
2019-07-04 04:08:19 +02:00
|
|
|
tcp_options=tcp_options,
|
2016-05-08 00:50:15 +02:00
|
|
|
credentials=credentials)
|
2013-01-17 23:15:40 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def _generate_ctag(self, queue_name: str) -> str:
|
2020-06-10 06:41:04 +02:00
|
|
|
return f"{queue_name}_{str(random.getrandbits(16))}"
|
2013-02-15 17:01:28 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def _reconnect_consumer_callback(self, queue: str, consumer: Consumer) -> None:
|
2020-06-10 06:41:04 +02:00
|
|
|
self.log.info(f"Queue reconnecting saved consumer {consumer} to queue {queue}")
|
2020-07-05 03:18:11 +02:00
|
|
|
self.ensure_queue(
|
|
|
|
queue,
|
|
|
|
lambda channel: channel.basic_consume(
|
|
|
|
queue,
|
|
|
|
consumer,
|
|
|
|
consumer_tag=self._generate_ctag(queue),
|
|
|
|
),
|
|
|
|
)
|
2013-09-09 20:13:40 +02:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def _reconnect_consumer_callbacks(self) -> None:
|
2013-03-11 19:53:41 +01:00
|
|
|
for queue, consumers in self.consumers.items():
|
|
|
|
for consumer in consumers:
|
2013-09-09 20:13:40 +02:00
|
|
|
self._reconnect_consumer_callback(queue, consumer)
|
2013-03-11 19:53:41 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def close(self) -> None:
|
2013-04-16 17:49:03 +02:00
|
|
|
if self.connection:
|
|
|
|
self.connection.close()
|
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def ready(self) -> bool:
|
2013-01-18 19:15:09 +01:00
|
|
|
return self.channel is not None
|
|
|
|
|
2020-07-05 03:18:11 +02:00
|
|
|
def ensure_queue(self, queue_name: str, callback: Callable[[BlockingChannel], None]) -> None:
|
2013-01-29 00:00:02 +01:00
|
|
|
'''Ensure that a given queue has been declared, and then call
|
|
|
|
the callback with no arguments.'''
|
queue: Prevent an AttributeError rather than swallowing it.
When the RabbitMQ server disappears, we log errors like these:
```
Traceback (most recent call last):
File "./zerver/lib/queue.py", line 114, in json_publish
self.publish(queue_name, ujson.dumps(body))
File "./zerver/lib/queue.py", line 108, in publish
self.ensure_queue(queue_name, do_publish)
File "./zerver/lib/queue.py", line 88, in ensure_queue
if not self.connection.is_open:
AttributeError: 'NoneType' object has no attribute 'is_open'
During handling of the above exception, another exception occurred:
[... traceback of connection failure inside the retried self.publish()]
```
That's a type error -- a programming error, not an exceptional
condition from outside the program. Fix the programming error.
Also move the retry out of the `except:` block, so that if it also
fails we don't get the exceptions stacked on each other. This is a
new feature of Python 3 which is sometimes indispensable for
debugging, and which surfaced this nit in the logs (on Python 2 we'd
never see the AttributeError part), but in some cases it can cause a
lot of spew if care isn't taken.
2017-10-19 00:11:55 +02:00
|
|
|
if self.connection is None or not self.connection.is_open:
|
2013-03-11 19:53:41 +01:00
|
|
|
self._connect()
|
|
|
|
|
2020-07-05 03:18:11 +02:00
|
|
|
assert self.channel is not None
|
2013-01-29 00:00:02 +01:00
|
|
|
if queue_name not in self.queues:
|
|
|
|
self.channel.queue_declare(queue=queue_name, durable=True)
|
|
|
|
self.queues.add(queue_name)
|
2020-07-05 03:18:11 +02:00
|
|
|
callback(self.channel)
|
2013-01-11 21:16:42 +01:00
|
|
|
|
2020-06-06 04:22:15 +02:00
|
|
|
def publish(self, queue_name: str, body: bytes) -> None:
|
2020-07-05 03:18:11 +02:00
|
|
|
def do_publish(channel: BlockingChannel) -> None:
|
|
|
|
channel.basic_publish(
|
2017-01-24 07:06:13 +01:00
|
|
|
exchange='',
|
|
|
|
routing_key=queue_name,
|
|
|
|
properties=pika.BasicProperties(delivery_mode=2),
|
|
|
|
body=body)
|
2013-04-16 22:58:21 +02:00
|
|
|
|
2020-06-10 06:41:04 +02:00
|
|
|
statsd.incr(f"rabbitmq.publish.{queue_name}")
|
2013-04-16 22:58:21 +02:00
|
|
|
|
|
|
|
self.ensure_queue(queue_name, do_publish)
|
2013-01-11 21:16:42 +01:00
|
|
|
|
2020-06-06 04:22:15 +02:00
|
|
|
def json_publish(self, queue_name: str, body: Mapping[str, Any]) -> None:
|
2020-08-07 01:09:47 +02:00
|
|
|
data = orjson.dumps(body)
|
2013-03-11 19:53:41 +01:00
|
|
|
try:
|
2020-06-06 04:22:15 +02:00
|
|
|
self.publish(queue_name, data)
|
queue: Prevent an AttributeError rather than swallowing it.
When the RabbitMQ server disappears, we log errors like these:
```
Traceback (most recent call last):
File "./zerver/lib/queue.py", line 114, in json_publish
self.publish(queue_name, ujson.dumps(body))
File "./zerver/lib/queue.py", line 108, in publish
self.ensure_queue(queue_name, do_publish)
File "./zerver/lib/queue.py", line 88, in ensure_queue
if not self.connection.is_open:
AttributeError: 'NoneType' object has no attribute 'is_open'
During handling of the above exception, another exception occurred:
[... traceback of connection failure inside the retried self.publish()]
```
That's a type error -- a programming error, not an exceptional
condition from outside the program. Fix the programming error.
Also move the retry out of the `except:` block, so that if it also
fails we don't get the exceptions stacked on each other. This is a
new feature of Python 3 which is sometimes indispensable for
debugging, and which surfaced this nit in the logs (on Python 2 we'd
never see the AttributeError part), but in some cases it can cause a
lot of spew if care isn't taken.
2017-10-19 00:11:55 +02:00
|
|
|
return
|
|
|
|
except pika.exceptions.AMQPConnectionError:
|
2013-03-11 19:53:41 +01:00
|
|
|
self.log.warning("Failed to send to rabbitmq, trying to reconnect and send again")
|
|
|
|
|
queue: Prevent an AttributeError rather than swallowing it.
When the RabbitMQ server disappears, we log errors like these:
```
Traceback (most recent call last):
File "./zerver/lib/queue.py", line 114, in json_publish
self.publish(queue_name, ujson.dumps(body))
File "./zerver/lib/queue.py", line 108, in publish
self.ensure_queue(queue_name, do_publish)
File "./zerver/lib/queue.py", line 88, in ensure_queue
if not self.connection.is_open:
AttributeError: 'NoneType' object has no attribute 'is_open'
During handling of the above exception, another exception occurred:
[... traceback of connection failure inside the retried self.publish()]
```
That's a type error -- a programming error, not an exceptional
condition from outside the program. Fix the programming error.
Also move the retry out of the `except:` block, so that if it also
fails we don't get the exceptions stacked on each other. This is a
new feature of Python 3 which is sometimes indispensable for
debugging, and which surfaced this nit in the logs (on Python 2 we'd
never see the AttributeError part), but in some cases it can cause a
lot of spew if care isn't taken.
2017-10-19 00:11:55 +02:00
|
|
|
self._reconnect()
|
2020-06-06 04:22:15 +02:00
|
|
|
self.publish(queue_name, data)
|
2013-01-11 21:16:42 +01:00
|
|
|
|
2020-10-09 22:50:53 +02:00
|
|
|
def start_json_consumer(self,
|
|
|
|
queue_name: str,
|
|
|
|
callback: Callable[[List[Dict[str, Any]]], None],
|
|
|
|
batch_size: int=1,
|
|
|
|
timeout: Optional[int]=None) -> None:
|
|
|
|
if batch_size == 1:
|
|
|
|
timeout = None
|
|
|
|
|
|
|
|
def do_consume(channel: BlockingChannel) -> None:
|
|
|
|
events: List[Dict[str, Any]] = []
|
|
|
|
last_process = time.time()
|
|
|
|
max_processed: Optional[int] = None
|
|
|
|
self.is_consuming = True
|
|
|
|
|
|
|
|
# This iterator technique will iteratively collect up to
|
|
|
|
# batch_size events from the RabbitMQ queue (if present)
|
|
|
|
# before calling the callback with the batch. If not
|
|
|
|
# enough events are present, it will sleep for at most
|
|
|
|
# timeout seconds before calling the callback with the
|
|
|
|
# batch of events it has.
|
|
|
|
for method, properties, body in channel.consume(queue_name, inactivity_timeout=timeout):
|
|
|
|
if body is not None:
|
|
|
|
events.append(orjson.loads(body))
|
|
|
|
max_processed = method.delivery_tag
|
|
|
|
now = time.time()
|
|
|
|
if len(events) >= batch_size or (timeout and now >= last_process + timeout):
|
|
|
|
if events:
|
|
|
|
try:
|
|
|
|
callback(events)
|
|
|
|
channel.basic_ack(max_processed, multiple=True)
|
|
|
|
except Exception:
|
|
|
|
channel.basic_nack(max_processed, multiple=True)
|
|
|
|
raise
|
|
|
|
events = []
|
|
|
|
last_process = now
|
|
|
|
if not self.is_consuming:
|
|
|
|
break
|
|
|
|
self.ensure_queue(queue_name, do_consume)
|
|
|
|
|
queue: Rename queue_size, and update for all local queues.
Despite its name, the `queue_size` method does not return the number
of items in the queue; it returns the number of items that the local
consumer has delivered but unprocessed. These are often, but not
always, the same.
RabbitMQ's queues maintain the queue of unacknowledged messages; when
a consumer connects, it sends to the consumer some number of messages
to handle, known as the "prefetch." This is a performance
optimization, to ensure the consumer code does not need to wait for a
network round-trip before having new data to consume.
The default prefetch is 0, which means that RabbitMQ immediately dumps
all outstanding messages to the consumer, which slowly processes and
acknowledges them. If a second consumer were to connect to the same
queue, they would receive no messages to process, as the first
consumer has already been allocated them. If the first consumer
disconnects or crashes, all prior events sent to it are then made
available for other consumers on the queue.
The consumer does not know the total size of the queue -- merely how
many messages it has been handed.
No change is made to the prefetch here; however, future changes may
wish to limit the prefetch, either for memory-saving, or to allow
multiple consumers to work the same queue.
Rename the method to make clear that it only contains information
about the local queue in the consumer, not the full RabbitMQ queue.
Also include the waiting message count, which is used by the
`consume()` iterator for similar purpose to the pending events list.
2020-10-09 22:12:55 +02:00
|
|
|
def local_queue_size(self) -> int:
|
2020-07-05 03:18:11 +02:00
|
|
|
assert self.channel is not None
|
queue: Rename queue_size, and update for all local queues.
Despite its name, the `queue_size` method does not return the number
of items in the queue; it returns the number of items that the local
consumer has delivered but unprocessed. These are often, but not
always, the same.
RabbitMQ's queues maintain the queue of unacknowledged messages; when
a consumer connects, it sends to the consumer some number of messages
to handle, known as the "prefetch." This is a performance
optimization, to ensure the consumer code does not need to wait for a
network round-trip before having new data to consume.
The default prefetch is 0, which means that RabbitMQ immediately dumps
all outstanding messages to the consumer, which slowly processes and
acknowledges them. If a second consumer were to connect to the same
queue, they would receive no messages to process, as the first
consumer has already been allocated them. If the first consumer
disconnects or crashes, all prior events sent to it are then made
available for other consumers on the queue.
The consumer does not know the total size of the queue -- merely how
many messages it has been handed.
No change is made to the prefetch here; however, future changes may
wish to limit the prefetch, either for memory-saving, or to allow
multiple consumers to work the same queue.
Rename the method to make clear that it only contains information
about the local queue in the consumer, not the full RabbitMQ queue.
Also include the waiting message count, which is used by the
`consume()` iterator for similar purpose to the pending events list.
2020-10-09 22:12:55 +02:00
|
|
|
return self.channel.get_waiting_message_count() + len(self.channel._pending_events)
|
2020-03-18 20:48:49 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def stop_consuming(self) -> None:
|
2020-07-05 03:18:11 +02:00
|
|
|
assert self.channel is not None
|
2020-10-09 22:50:53 +02:00
|
|
|
assert self.is_consuming
|
|
|
|
self.is_consuming = False
|
2013-02-15 17:03:28 +01:00
|
|
|
self.channel.stop_consuming()
|
2013-01-18 23:15:23 +01:00
|
|
|
|
2019-01-31 08:10:34 +01:00
|
|
|
# Patch pika.adapters.tornado_connection.TornadoConnection so that a socket error doesn't
|
2013-04-17 16:11:43 +02:00
|
|
|
# throw an exception and disconnect the tornado process from the rabbitmq
|
|
|
|
# queue. Instead, just re-connect as usual
|
2019-01-31 08:10:34 +01:00
|
|
|
class ExceptionFreeTornadoConnection(pika.adapters.tornado_connection.TornadoConnection):
|
2017-11-05 11:15:10 +01:00
|
|
|
def _adapter_disconnect(self) -> None:
|
2013-04-17 16:11:43 +02:00
|
|
|
try:
|
2017-10-27 08:28:23 +02:00
|
|
|
super()._adapter_disconnect()
|
2013-04-17 16:11:43 +02:00
|
|
|
except (pika.exceptions.ProbableAuthenticationError,
|
2013-04-18 21:40:37 +02:00
|
|
|
pika.exceptions.ProbableAccessDeniedError,
|
2020-06-12 01:35:37 +02:00
|
|
|
pika.exceptions.IncompatibleProtocolError):
|
|
|
|
logging.warning("Caught exception in ExceptionFreeTornadoConnection when \
|
|
|
|
calling _adapter_disconnect, ignoring", exc_info=True)
|
2013-04-17 16:11:43 +02:00
|
|
|
|
|
|
|
|
2013-01-18 23:15:23 +01:00
|
|
|
class TornadoQueueClient(SimpleQueueClient):
|
|
|
|
# Based on:
|
2016-04-30 01:00:06 +02:00
|
|
|
# https://pika.readthedocs.io/en/0.9.8/examples/asynchronous_consumer_example.html
|
2017-11-05 11:15:10 +01:00
|
|
|
def __init__(self) -> None:
|
2018-03-20 02:08:52 +01:00
|
|
|
super().__init__(
|
|
|
|
# TornadoConnection can process heartbeats, so enable them.
|
|
|
|
rabbitmq_heartbeat=None)
|
2020-07-05 03:18:11 +02:00
|
|
|
self._on_open_cbs: List[Callable[[BlockingChannel], None]] = []
|
2018-03-21 00:37:04 +01:00
|
|
|
self._connection_failure_count = 0
|
2013-01-18 23:15:23 +01:00
|
|
|
|
2018-03-20 02:04:01 +01:00
|
|
|
def _connect(self) -> None:
|
2013-03-11 19:53:41 +01:00
|
|
|
self.log.info("Beginning TornadoQueueClient connection")
|
2013-04-17 16:11:43 +02:00
|
|
|
self.connection = ExceptionFreeTornadoConnection(
|
2013-01-18 23:15:23 +01:00
|
|
|
self._get_parameters(),
|
2013-01-29 00:00:24 +01:00
|
|
|
on_open_callback = self._on_open,
|
2017-11-30 00:40:45 +01:00
|
|
|
on_open_error_callback = self._on_connection_open_error,
|
2017-11-29 23:58:18 +01:00
|
|
|
on_close_callback = self._on_connection_closed,
|
2017-11-29 23:54:38 +01:00
|
|
|
)
|
2013-01-18 23:15:23 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def _reconnect(self) -> None:
|
2013-03-11 19:53:41 +01:00
|
|
|
self.connection = None
|
|
|
|
self.channel = None
|
|
|
|
self.queues = set()
|
2017-11-30 00:40:45 +01:00
|
|
|
self.log.warning("TornadoQueueClient attempting to reconnect to RabbitMQ")
|
2013-04-17 18:11:28 +02:00
|
|
|
self._connect()
|
2013-03-11 19:53:41 +01:00
|
|
|
|
2017-11-30 00:40:45 +01:00
|
|
|
CONNECTION_RETRY_SECS = 2
|
2016-11-29 07:22:02 +01:00
|
|
|
|
2018-03-21 00:37:04 +01:00
|
|
|
# When the RabbitMQ server is restarted, it's normal for it to
|
|
|
|
# take a few seconds to come back; we'll retry a few times and all
|
|
|
|
# will be well. So for the first few failures, we report only at
|
|
|
|
# "warning" level, avoiding an email to the server admin.
|
|
|
|
#
|
|
|
|
# A loss of an existing connection starts a retry loop just like a
|
|
|
|
# failed connection attempt, so it counts as the first failure.
|
|
|
|
#
|
|
|
|
# On an unloaded test system, a RabbitMQ restart takes about 6s,
|
|
|
|
# potentially causing 4 failures. We add some headroom above that.
|
|
|
|
CONNECTION_FAILURES_BEFORE_NOTIFY = 10
|
|
|
|
|
2017-11-30 00:40:45 +01:00
|
|
|
def _on_connection_open_error(self, connection: pika.connection.Connection,
|
2019-11-21 02:12:59 +01:00
|
|
|
reason: Exception) -> None:
|
2018-03-21 00:37:04 +01:00
|
|
|
self._connection_failure_count += 1
|
2017-11-30 00:40:45 +01:00
|
|
|
retry_secs = self.CONNECTION_RETRY_SECS
|
2020-06-14 04:27:02 +02:00
|
|
|
self.log.log(
|
|
|
|
logging.CRITICAL
|
|
|
|
if self._connection_failure_count > self.CONNECTION_FAILURES_BEFORE_NOTIFY
|
|
|
|
else logging.WARNING,
|
|
|
|
"TornadoQueueClient couldn't connect to RabbitMQ, retrying in %d secs...",
|
|
|
|
retry_secs,
|
|
|
|
)
|
2017-11-30 00:40:45 +01:00
|
|
|
ioloop.IOLoop.instance().call_later(retry_secs, self._reconnect)
|
2013-03-11 19:53:41 +01:00
|
|
|
|
2017-11-30 00:40:45 +01:00
|
|
|
def _on_connection_closed(self, connection: pika.connection.Connection,
|
2019-11-21 02:12:59 +01:00
|
|
|
reason: Exception) -> None:
|
2018-03-21 00:37:04 +01:00
|
|
|
self._connection_failure_count = 1
|
2017-11-30 00:40:45 +01:00
|
|
|
retry_secs = self.CONNECTION_RETRY_SECS
|
2020-06-14 04:27:02 +02:00
|
|
|
self.log.warning(
|
|
|
|
"TornadoQueueClient lost connection to RabbitMQ, reconnecting in %d secs...",
|
|
|
|
retry_secs,
|
|
|
|
)
|
2017-11-30 00:40:45 +01:00
|
|
|
ioloop.IOLoop.instance().call_later(retry_secs, self._reconnect)
|
2013-03-11 19:53:41 +01:00
|
|
|
|
2017-11-30 00:58:52 +01:00
|
|
|
def _on_open(self, connection: pika.connection.Connection) -> None:
|
2018-03-21 00:37:04 +01:00
|
|
|
self._connection_failure_count = 0
|
2018-03-20 03:06:19 +01:00
|
|
|
try:
|
|
|
|
self.connection.channel(
|
|
|
|
on_open_callback = self._on_channel_open)
|
|
|
|
except pika.exceptions.ConnectionClosed:
|
|
|
|
# The connection didn't stay open long enough for this code to get to it.
|
|
|
|
# Let _on_connection_closed deal with trying again.
|
|
|
|
self.log.warning("TornadoQueueClient couldn't open channel: connection already closed")
|
2017-11-30 00:58:52 +01:00
|
|
|
|
|
|
|
def _on_channel_open(self, channel: BlockingChannel) -> None:
|
|
|
|
self.channel = channel
|
|
|
|
for callback in self._on_open_cbs:
|
2020-07-05 03:18:11 +02:00
|
|
|
callback(channel)
|
2017-11-30 00:58:52 +01:00
|
|
|
self._reconnect_consumer_callbacks()
|
|
|
|
self.log.info('TornadoQueueClient connected')
|
|
|
|
|
2020-07-05 03:18:11 +02:00
|
|
|
def ensure_queue(self, queue_name: str, callback: Callable[[BlockingChannel], None]) -> None:
|
2017-11-05 11:15:10 +01:00
|
|
|
def finish(frame: Any) -> None:
|
2020-07-05 03:18:11 +02:00
|
|
|
assert self.channel is not None
|
2013-01-29 00:00:02 +01:00
|
|
|
self.queues.add(queue_name)
|
2020-07-05 03:18:11 +02:00
|
|
|
callback(self.channel)
|
2013-01-29 00:00:02 +01:00
|
|
|
|
|
|
|
if queue_name not in self.queues:
|
2013-04-16 16:01:18 +02:00
|
|
|
# If we're not connected yet, send this message
|
|
|
|
# once we have created the channel
|
|
|
|
if not self.ready():
|
2020-07-05 03:18:11 +02:00
|
|
|
self._on_open_cbs.append(lambda channel: self.ensure_queue(queue_name, callback))
|
2013-04-16 16:01:18 +02:00
|
|
|
return
|
|
|
|
|
2020-07-05 03:18:11 +02:00
|
|
|
assert self.channel is not None
|
2013-04-16 16:01:18 +02:00
|
|
|
self.channel.queue_declare(queue=queue_name, durable=True, callback=finish)
|
2013-01-29 00:00:02 +01:00
|
|
|
else:
|
2020-07-05 03:18:11 +02:00
|
|
|
assert self.channel is not None
|
|
|
|
callback(self.channel)
|
2013-03-19 19:29:22 +01:00
|
|
|
|
2020-10-10 05:24:35 +02:00
|
|
|
def start_json_consumer(self,
|
|
|
|
queue_name: str,
|
|
|
|
callback: Callable[[List[Dict[str, Any]]], None],
|
|
|
|
batch_size: int=1,
|
|
|
|
timeout: Optional[int]=None) -> None:
|
2017-11-05 11:15:10 +01:00
|
|
|
def wrapped_consumer(ch: BlockingChannel,
|
|
|
|
method: Basic.Deliver,
|
|
|
|
properties: pika.BasicProperties,
|
2020-08-07 09:35:25 +02:00
|
|
|
body: bytes) -> None:
|
2020-10-10 05:24:35 +02:00
|
|
|
callback([orjson.loads(body)])
|
2013-03-19 19:29:22 +01:00
|
|
|
ch.basic_ack(delivery_tag=method.delivery_tag)
|
|
|
|
|
2020-10-10 05:24:35 +02:00
|
|
|
assert batch_size == 1
|
|
|
|
assert timeout is None
|
|
|
|
self.consumers[queue_name].add(wrapped_consumer)
|
|
|
|
|
2013-03-19 19:29:22 +01:00
|
|
|
if not self.ready():
|
|
|
|
return
|
|
|
|
|
2020-07-05 03:18:11 +02:00
|
|
|
self.ensure_queue(
|
|
|
|
queue_name,
|
|
|
|
lambda channel: channel.basic_consume(
|
|
|
|
queue_name,
|
|
|
|
wrapped_consumer,
|
|
|
|
consumer_tag=self._generate_ctag(queue_name),
|
|
|
|
),
|
|
|
|
)
|
2013-03-25 20:37:00 +01:00
|
|
|
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
queue_client: Optional[SimpleQueueClient] = None
|
2017-11-05 11:15:10 +01:00
|
|
|
def get_queue_client() -> SimpleQueueClient:
|
2013-10-23 22:33:59 +02:00
|
|
|
global queue_client
|
|
|
|
if queue_client is None:
|
|
|
|
if settings.RUNNING_INSIDE_TORNADO and settings.USING_RABBITMQ:
|
|
|
|
queue_client = TornadoQueueClient()
|
|
|
|
elif settings.USING_RABBITMQ:
|
|
|
|
queue_client = SimpleQueueClient()
|
2020-07-05 03:18:11 +02:00
|
|
|
else:
|
|
|
|
raise RuntimeError("Cannot get a queue client without USING_RABBITMQ")
|
2013-10-23 22:33:59 +02:00
|
|
|
|
|
|
|
return queue_client
|
2013-03-25 20:37:00 +01:00
|
|
|
|
|
|
|
# We using a simple lock to prevent multiple RabbitMQ messages being
|
|
|
|
# sent to the SimpleQueueClient at the same time; this is a workaround
|
|
|
|
# for an issue with the pika BlockingConnection where using
|
|
|
|
# BlockingConnection for multiple queues causes the channel to
|
|
|
|
# randomly close.
|
|
|
|
queue_lock = threading.RLock()
|
|
|
|
|
2020-07-05 03:18:11 +02:00
|
|
|
def queue_json_publish(
|
|
|
|
queue_name: str,
|
|
|
|
event: Dict[str, Any],
|
|
|
|
processor: Optional[Callable[[Any], None]] = None,
|
|
|
|
) -> None:
|
2013-03-25 20:37:00 +01:00
|
|
|
with queue_lock:
|
|
|
|
if settings.USING_RABBITMQ:
|
2013-10-23 22:33:59 +02:00
|
|
|
get_queue_client().json_publish(queue_name, event)
|
2017-11-24 13:18:46 +01:00
|
|
|
elif processor:
|
|
|
|
processor(event)
|
|
|
|
else:
|
2020-10-09 22:05:47 +02:00
|
|
|
# Must be imported here: A top section import leads to circular imports
|
2017-10-13 17:53:02 +02:00
|
|
|
from zerver.worker.queue_processors import get_worker
|
2020-10-09 22:44:15 +02:00
|
|
|
get_worker(queue_name).consume_single_event(event)
|
2017-08-18 07:56:53 +02:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def retry_event(queue_name: str,
|
|
|
|
event: Dict[str, Any],
|
|
|
|
failure_processor: Callable[[Dict[str, Any]], None]) -> None:
|
2017-10-28 03:14:13 +02:00
|
|
|
if 'failed_tries' not in event:
|
|
|
|
event['failed_tries'] = 0
|
2017-08-18 07:56:53 +02:00
|
|
|
event['failed_tries'] += 1
|
|
|
|
if event['failed_tries'] > MAX_REQUEST_RETRIES:
|
|
|
|
failure_processor(event)
|
|
|
|
else:
|
|
|
|
queue_json_publish(queue_name, event, lambda x: None)
|