2018-07-31 23:07:42 +02:00
|
|
|
# See https://zulip.readthedocs.io/en/latest/subsystems/caching.html for docs
|
2016-06-10 00:12:34 +02:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
from typing import Any, Callable, Dict, List, Tuple
|
2013-04-23 18:51:17 +02:00
|
|
|
|
2018-08-01 23:15:14 +02:00
|
|
|
import datetime
|
|
|
|
import logging
|
|
|
|
|
2013-03-13 19:15:29 +01:00
|
|
|
# This file needs to be different from cache.py because cache.py
|
2013-07-29 23:03:31 +02:00
|
|
|
# cannot import anything from zerver.models or we'd have an import
|
2013-03-13 19:15:29 +01:00
|
|
|
# loop
|
2018-08-01 23:15:14 +02:00
|
|
|
from analytics.models import RealmCount
|
2013-04-23 21:17:01 +02:00
|
|
|
from django.conf import settings
|
2013-07-29 23:03:31 +02:00
|
|
|
from zerver.models import Message, UserProfile, Stream, get_stream_cache_key, \
|
2020-03-15 19:14:09 +01:00
|
|
|
Client, get_client_cache_key, \
|
2016-10-04 15:40:02 +02:00
|
|
|
Huddle, huddle_hash_cache_key
|
2019-02-02 23:53:55 +01:00
|
|
|
from zerver.lib.cache import \
|
2017-08-25 07:43:38 +02:00
|
|
|
user_profile_by_api_key_cache_key, \
|
2017-05-22 19:45:54 +02:00
|
|
|
user_profile_cache_key, get_remote_cache_time, get_remote_cache_requests, \
|
|
|
|
cache_set_many, to_dict_cache_key_id
|
2017-10-20 20:29:49 +02:00
|
|
|
from zerver.lib.message import MessageDict
|
2018-08-01 10:53:40 +02:00
|
|
|
from zerver.lib.users import get_all_api_keys
|
2016-10-17 12:02:45 +02:00
|
|
|
from importlib import import_module
|
2013-04-23 21:17:01 +02:00
|
|
|
from django.contrib.sessions.models import Session
|
2013-04-25 19:33:51 +02:00
|
|
|
from django.db.models import Q
|
2018-08-01 23:15:14 +02:00
|
|
|
from django.utils.timezone import now as timezone_now
|
2013-01-09 20:35:19 +01:00
|
|
|
|
2013-04-25 19:33:51 +02:00
|
|
|
MESSAGE_CACHE_SIZE = 75000
|
2013-01-14 17:51:18 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def message_fetch_objects() -> List[Any]:
|
2013-11-13 17:28:01 +01:00
|
|
|
try:
|
|
|
|
max_id = Message.objects.only('id').order_by("-id")[0].id
|
|
|
|
except IndexError:
|
|
|
|
return []
|
2013-04-25 19:33:51 +02:00
|
|
|
return Message.objects.select_related().filter(~Q(sender__email='tabbott/extra@mit.edu'),
|
2016-11-30 14:17:35 +01:00
|
|
|
id__gt=max_id - MESSAGE_CACHE_SIZE)
|
Increase efficiency of initial message cache query
In repeated trials, the initial data fetch used to take about 1100ms.
In practice, it was often taking >2000ms, probably due to caching
effects. This commit cuts the time down to about 300ms in repeated
trials.
Note that the semantics are changed slightly in that we may no longer
get exactly 25000 messages. However, holes in the message_id
sequence are currently very rare or non-existent so this shouldn't be
a problem and we don't care about the exact number of messages
anyway.
I believe the problem was that the query planner was unable to
effectively use the LIMIT clause to figure out that only a small
subset of zephyr_message was going to be needed. Thus, it planned
for operating on the entire table and decided it could not use a more
efficient plan because work_mem, although large, would not be large
enough to execute the query over all of zephyr_message.
The original query was:
SELECT "zephyr_message"."id", "zephyr_message"."sender_id", "zephyr_message"."recipient_id", "zephyr_message"."subject", "zephyr_message"."content", "zephyr_message"."rendered_content", "zephyr_message"."rendered_content_version", "zephyr_message"."pub_date", "zephyr_message"."sending_client_id", "zephyr_userprofile"."id", "zephyr_userprofile"."password", "zephyr_userprofile"."last_login", "zephyr_userprofile"."email", "zephyr_userprofile"."is_staff", "zephyr_userprofile"."is_active", "zephyr_userprofile"."date_joined", "zephyr_userprofile"."full_name", "zephyr_userprofile"."short_name", "zephyr_userprofile"."pointer", "zephyr_userprofile"."last_pointer_updater", "zephyr_userprofile"."realm_id", "zephyr_userprofile"."api_key", "zephyr_userprofile"."enable_desktop_notifications", "zephyr_userprofile"."enter_sends", "zephyr_userprofile"."tutorial_status", "zephyr_realm"."id", "zephyr_realm"."domain", "zephyr_realm"."restricted_to_domain", "zephyr_recipient"."id", "zephyr_recipient"."type_id", "zephyr_recipient"."type", "zephyr_client"."id", "zephyr_client"."name" FROM "zephyr_message" INNER JOIN "zephyr_userprofile" ON ( "zephyr_message"."sender_id" = "zephyr_userprofile"."id" ) INNER JOIN "zephyr_realm" ON ( "zephyr_userprofile"."realm_id" = "zephyr_realm"."id" ) INNER JOIN "zephyr_recipient" ON ( "zephyr_message"."recipient_id" = "zephyr_recipient"."id" ) INNER JOIN "zephyr_client" ON ( "zephyr_message"."sending_client_id" = "zephyr_client"."id" ) ORDER BY "zephyr_message"."id" DESC LIMIT 25000;
with query plan:
Limit (cost=0.00..27120.95 rows=25000 width=362) (actual time=0.051..1121.282 rows=25000 loops=1)
-> Nested Loop (cost=0.00..5330872.99 rows=4913981 width=362) (actual time=0.048..1081.014 rows=25000 loops=1)
-> Nested Loop (cost=0.00..3932643.31 rows=4913981 width=344) (actual time=0.042..926.398 rows=25000 loops=1)
-> Nested Loop (cost=0.00..2550275.29 rows=4913981 width=334) (actual time=0.035..752.524 rows=25000 loops=1)
Join Filter: (zephyr_message.sending_client_id = zephyr_client.id)
-> Nested Loop (cost=0.00..1739467.29 rows=4913981 width=320) (actual time=0.024..217.348 rows=25000 loops=1)
-> Index Scan Backward using zephyr_message_pkey on zephyr_message (cost=0.00..362510.09 rows=4913981 width=156) (actual time=0.014..42.097 rows=25000 loops=1)
-> Index Scan using zephyr_userprofile_pkey on zephyr_userprofile (cost=0.00..0.27 rows=1 width=164) (actual time=0.003..0.004 rows=1 loops=25000)
Index Cond: (id = zephyr_message.sender_id)
-> Materialize (cost=0.00..1.17 rows=11 width=14) (actual time=0.001..0.010 rows=11 loops=25000)
-> Seq Scan on zephyr_client (cost=0.00..1.11 rows=11 width=14) (actual time=0.002..0.010 rows=11 loops=1)
-> Index Scan using zephyr_recipient_pkey on zephyr_recipient (cost=0.00..0.27 rows=1 width=10) (actual time=0.002..0.003 rows=1 loops=25000)
Index Cond: (id = zephyr_message.recipient_id)
-> Index Scan using zephyr_realm_pkey on zephyr_realm (cost=0.00..0.27 rows=1 width=18) (actual time=0.002..0.003 rows=1 loops=25000)
Index Cond: (id = zephyr_userprofile.realm_id)
Total runtime: 1141.408 ms
In the new code, we do two queries:
SELECT "zephyr_message"."id" FROM "zephyr_message" ORDER BY "zephyr_message"."id" DESC LIMIT 1
followed by:
SELECT "zephyr_message"."id", "zephyr_message"."sender_id", "zephyr_message"."recipient_id", "zephyr_message"."subject", "zephyr_message"."content", "zephyr_message"."rendered_content", "zephyr_message"."rendered_content_version", "zephyr_message"."pub_date", "zephyr_message"."sending_client_id", "zephyr_userprofile"."id", "zephyr_userprofile"."password", "zephyr_userprofile"."last_login", "zephyr_userprofile"."email", "zephyr_userprofile"."is_staff", "zephyr_userprofile"."is_active", "zephyr_userprofile"."date_joined", "zephyr_userprofile"."full_name", "zephyr_userprofile"."short_name", "zephyr_userprofile"."pointer", "zephyr_userprofile"."last_pointer_updater", "zephyr_userprofile"."realm_id", "zephyr_userprofile"."api_key", "zephyr_userprofile"."enable_desktop_notifications", "zephyr_userprofile"."enter_sends", "zephyr_userprofile"."tutorial_status", "zephyr_realm"."id", "zephyr_realm"."domain", "zephyr_realm"."restricted_to_domain", "zephyr_recipient"."id", "zephyr_recipient"."type_id", "zephyr_recipient"."type", "zephyr_client"."id", "zephyr_client"."name" FROM "zephyr_message" INNER JOIN "zephyr_userprofile" ON ( "zephyr_message"."sender_id" = "zephyr_userprofile"."id" ) INNER JOIN "zephyr_realm" ON ( "zephyr_userprofile"."realm_id" = "zephyr_realm"."id" ) INNER JOIN "zephyr_recipient" ON ( "zephyr_message"."recipient_id" = "zephyr_recipient"."id" ) INNER JOIN "zephyr_client" ON ( "zephyr_message"."sending_client_id" = "zephyr_client"."id" ) WHERE "zephyr_message"."id" > 4941883
with the message id filled in as the result of the first query. The
new query differs from the original only in that its ORDER BY and
LIMIT clauses are replaced by a WHERE clause. The second query has
query plan:
Hash Join (cost=709.30..28048.18 rows=20544 width=365) (actual time=41.678..279.261 rows=25041 loops=1)
Hash Cond: (zephyr_message.recipient_id = zephyr_recipient.id)
-> Hash Join (cost=102.98..27056.66 rows=20544 width=355) (actual time=3.686..190.730 rows=25041 loops=1)
Hash Cond: (zephyr_message.sending_client_id = zephyr_client.id)
-> Hash Join (cost=101.73..26772.94 rows=20544 width=341) (actual time=3.649..143.695 rows=25041 loops=1)
Hash Cond: (zephyr_userprofile.realm_id = zephyr_realm.id)
-> Hash Join (cost=99.99..26488.71 rows=20544 width=323) (actual time=3.578..96.746 rows=25041 loops=1)
Hash Cond: (zephyr_message.sender_id = zephyr_userprofile.id)
-> Index Scan using zephyr_message_pkey on zephyr_message (cost=0.00..26106.24 rows=20544 width=159) (actual time=0.017..41.980 rows=25041 loops=1)
Index Cond: (id > 4941883)
-> Hash (cost=83.33..83.33 rows=1333 width=164) (actual time=3.548..3.548 rows=1333 loops=1)
Buckets: 1024 Batches: 1 Memory Usage: 275kB
-> Seq Scan on zephyr_userprofile (cost=0.00..83.33 rows=1333 width=164) (actual time=0.006..1.646 rows=1333 loops=1)
-> Hash (cost=1.33..1.33 rows=33 width=18) (actual time=0.064..0.064 rows=33 loops=1)
Buckets: 1024 Batches: 1 Memory Usage: 2kB
-> Seq Scan on zephyr_realm (cost=0.00..1.33 rows=33 width=18) (actual time=0.003..0.033 rows=33 loops=1)
-> Hash (cost=1.11..1.11 rows=11 width=14) (actual time=0.027..0.027 rows=11 loops=1)
Buckets: 1024 Batches: 1 Memory Usage: 1kB
-> Seq Scan on zephyr_client (cost=0.00..1.11 rows=11 width=14) (actual time=0.003..0.013 rows=11 loops=1)
-> Hash (cost=335.03..335.03 rows=21703 width=10) (actual time=37.974..37.974 rows=21761 loops=1)
Buckets: 4096 Batches: 1 Memory Usage: 893kB
-> Seq Scan on zephyr_recipient (cost=0.00..335.03 rows=21703 width=10) (actual time=0.004..18.443 rows=21761 loops=1)
Total runtime: 299.300 ms
(imported from commit b2a70cccc47be7970df407c6be00eccd2e8be82a)
2013-04-25 01:30:19 +02:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def message_cache_items(items_for_remote_cache: Dict[str, Tuple[bytes]],
|
2017-11-05 11:15:10 +01:00
|
|
|
message: Message) -> None:
|
2017-10-20 20:29:49 +02:00
|
|
|
'''
|
|
|
|
Note: this code is untested, and the caller has been
|
|
|
|
commented out for a while.
|
|
|
|
'''
|
|
|
|
key = to_dict_cache_key_id(message.id)
|
|
|
|
value = MessageDict.to_dict_uncached(message)
|
|
|
|
items_for_remote_cache[key] = (value,)
|
2013-03-13 18:52:54 +01:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def user_cache_items(items_for_remote_cache: Dict[str, Tuple[UserProfile]],
|
2017-11-05 11:15:10 +01:00
|
|
|
user_profile: UserProfile) -> None:
|
2018-08-01 10:53:40 +02:00
|
|
|
for api_key in get_all_api_keys(user_profile):
|
|
|
|
items_for_remote_cache[user_profile_by_api_key_cache_key(api_key)] = (user_profile,)
|
2018-12-07 00:05:57 +01:00
|
|
|
items_for_remote_cache[user_profile_cache_key(user_profile.email,
|
|
|
|
user_profile.realm)] = (user_profile,)
|
2018-08-01 21:56:24 +02:00
|
|
|
# We have other user_profile caches, but none of them are on the
|
|
|
|
# core serving path for lots of requests.
|
2013-03-13 18:52:54 +01:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def stream_cache_items(items_for_remote_cache: Dict[str, Tuple[Stream]],
|
2017-11-05 11:15:10 +01:00
|
|
|
stream: Stream) -> None:
|
2016-03-31 03:21:05 +02:00
|
|
|
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm_id)] = (stream,)
|
2013-03-26 17:07:20 +01:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def client_cache_items(items_for_remote_cache: Dict[str, Tuple[Client]],
|
2017-11-05 11:15:10 +01:00
|
|
|
client: Client) -> None:
|
2016-03-31 03:21:05 +02:00
|
|
|
items_for_remote_cache[get_client_cache_key(client.name)] = (client,)
|
2013-03-26 17:07:20 +01:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def huddle_cache_items(items_for_remote_cache: Dict[str, Tuple[Huddle]],
|
2017-11-05 11:15:10 +01:00
|
|
|
huddle: Huddle) -> None:
|
2016-03-31 03:21:05 +02:00
|
|
|
items_for_remote_cache[huddle_hash_cache_key(huddle.huddle_hash)] = (huddle,)
|
2013-03-26 17:47:52 +01:00
|
|
|
|
2013-04-23 21:17:01 +02:00
|
|
|
session_engine = import_module(settings.SESSION_ENGINE)
|
2018-05-11 01:40:23 +02:00
|
|
|
def session_cache_items(items_for_remote_cache: Dict[str, str],
|
2017-11-05 11:15:10 +01:00
|
|
|
session: Session) -> None:
|
2018-11-19 05:02:28 +01:00
|
|
|
if settings.SESSION_ENGINE != "django.contrib.sessions.backends.cached_db":
|
|
|
|
# If we're not using the cached_db session engine, we there
|
|
|
|
# will be no store.cache_key attribute, and in any case we
|
|
|
|
# don't need to fill the cache, since it won't exist.
|
|
|
|
return
|
2020-04-22 04:13:37 +02:00
|
|
|
store = session_engine.SessionStore(session_key=session.session_key) # type: ignore[attr-defined] # import_module
|
2016-03-31 03:21:05 +02:00
|
|
|
items_for_remote_cache[store.cache_key] = store.decode(session.session_data)
|
2013-04-23 21:17:01 +02:00
|
|
|
|
2018-08-01 23:15:14 +02:00
|
|
|
def get_active_realm_ids() -> List[int]:
|
|
|
|
"""For servers like zulipchat.com with a lot of realms, it only makes
|
|
|
|
sense to do cache-filling work for realms that have any currently
|
|
|
|
active users/clients. Otherwise, we end up with every single-user
|
|
|
|
trial organization that has ever been created costing us N streams
|
|
|
|
worth of cache work (where N is the number of default streams for
|
|
|
|
a new organization).
|
|
|
|
"""
|
|
|
|
date = timezone_now() - datetime.timedelta(days=2)
|
|
|
|
return RealmCount.objects.filter(
|
|
|
|
end_time__gte=date,
|
|
|
|
property="1day_actives::day",
|
|
|
|
value__gt=0).distinct("realm_id").values_list("realm_id", flat=True)
|
|
|
|
|
|
|
|
def get_streams() -> List[Stream]:
|
|
|
|
return Stream.objects.select_related().filter(
|
|
|
|
realm__in=get_active_realm_ids()).exclude(
|
|
|
|
# We filter out Zephyr realms, because they can easily
|
|
|
|
# have 10,000s of streams with only 1 subscriber.
|
|
|
|
is_in_zephyr_realm=True)
|
|
|
|
|
|
|
|
def get_users() -> List[UserProfile]:
|
|
|
|
return UserProfile.objects.select_related().filter(
|
|
|
|
long_term_idle=False,
|
|
|
|
realm__in=get_active_realm_ids())
|
|
|
|
|
2013-03-26 18:38:39 +01:00
|
|
|
# Format is (objects query, items filler function, timeout, batch size)
|
|
|
|
#
|
|
|
|
# The objects queries are put inside lambdas to prevent Django from
|
|
|
|
# doing any setup for things we're unlikely to use (without the lambda
|
|
|
|
# wrapper the below adds an extra 3ms or so to startup time for
|
|
|
|
# anything importing this file).
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
cache_fillers: Dict[str, Tuple[Callable[[], List[Any]], Callable[[Dict[str, Any], Any], None], int, int]] = {
|
2018-08-01 23:15:14 +02:00
|
|
|
'user': (get_users, user_cache_items, 3600*24*7, 10000),
|
2013-03-26 18:38:39 +01:00
|
|
|
'client': (lambda: Client.objects.select_related().all(), client_cache_items, 3600*24*7, 10000),
|
2018-08-01 23:15:14 +02:00
|
|
|
'stream': (get_streams, stream_cache_items, 3600*24*7, 10000),
|
2016-12-02 08:15:16 +01:00
|
|
|
# Message cache fetching disabled until we can fix the fact that it
|
|
|
|
# does a bunch of inefficient memcached queries as part of filling
|
|
|
|
# the display_recipient cache
|
|
|
|
# 'message': (message_fetch_objects, message_cache_items, 3600 * 24, 1000),
|
2013-03-26 18:38:39 +01:00
|
|
|
'huddle': (lambda: Huddle.objects.select_related().all(), huddle_cache_items, 3600*24*7, 10000),
|
2013-04-23 21:17:01 +02:00
|
|
|
'session': (lambda: Session.objects.all(), session_cache_items, 3600*24*7, 10000),
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
}
|
2013-03-26 17:24:02 +01:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def fill_remote_cache(cache: str) -> None:
|
2016-03-31 03:23:21 +02:00
|
|
|
remote_cache_time_start = get_remote_cache_time()
|
2016-03-31 03:24:05 +02:00
|
|
|
remote_cache_requests_start = get_remote_cache_requests()
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
items_for_remote_cache: Dict[str, Any] = {}
|
2013-03-26 18:38:39 +01:00
|
|
|
(objects, items_filler, timeout, batch_size) = cache_fillers[cache]
|
|
|
|
count = 0
|
|
|
|
for obj in objects():
|
2016-03-31 03:21:05 +02:00
|
|
|
items_filler(items_for_remote_cache, obj)
|
2013-03-26 18:38:39 +01:00
|
|
|
count += 1
|
|
|
|
if (count % batch_size == 0):
|
2016-03-31 03:21:05 +02:00
|
|
|
cache_set_many(items_for_remote_cache, timeout=3600*24)
|
|
|
|
items_for_remote_cache = {}
|
|
|
|
cache_set_many(items_for_remote_cache, timeout=3600*24*7)
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Successfully populated %s cache! Consumed %s remote cache queries (%s time)",
|
|
|
|
cache, get_remote_cache_requests() - remote_cache_requests_start,
|
|
|
|
round(get_remote_cache_time() - remote_cache_time_start, 2))
|