mirror of https://github.com/zulip/zulip.git
Update memcache -> remote cache in inline documentation.
This commit is contained in:
parent
beac606ce6
commit
2fe0700f55
|
@ -79,13 +79,13 @@ In our Django code, never do direct
|
|||
use ``get_user_profile_by_{email,id}``. There are 3 reasons for this:
|
||||
|
||||
#. It's guaranteed to correctly do a case-inexact lookup
|
||||
#. It fetches the user object from memcached, which is faster
|
||||
#. It fetches the user object from remote cache, which is faster
|
||||
#. It always fetches a UserProfile object which has been queried using
|
||||
.selected\_related(), and thus will perform well when one later
|
||||
accesses related models like the Realm.
|
||||
|
||||
Similarly we have ``get_client`` and ``get_stream`` functions to fetch
|
||||
those commonly accessed objects via memcached.
|
||||
those commonly accessed objects via remote cache.
|
||||
|
||||
Using Django model objects as keys in sets/dicts
|
||||
------------------------------------------------
|
||||
|
|
|
@ -33,11 +33,11 @@
|
|||
# every 5 seconds (see local.js), so aggregating over a longer period of time
|
||||
# will inflate the output value
|
||||
|
||||
# Aggregate all per-bucket memcached stats into a generic hit/miss stat
|
||||
# Aggregate all per-bucket remote cache stats into a generic hit/miss stat
|
||||
stats.<app>.cache.all.hit (5) = sum stats.<app>.cache.*.hit
|
||||
stats.<app>.cache.all.miss (5) = sum stats.<app>.cache.*.miss
|
||||
|
||||
# Aggregate all per-bucket memcached stats counts into a generic hit/miss stat
|
||||
# Aggregate all per-bucket remote cache stats counts into a generic hit/miss stat
|
||||
stats_counts.<app>.cache.all.hit (5) = sum stats_counts.<app>.cache.*.hit
|
||||
stats_counts.<app>.cache.all.miss (5) = sum stats_counts.<app>.cache.*.miss
|
||||
|
||||
|
|
|
@ -436,7 +436,7 @@ def do_deactivate_stream(stream, log=True):
|
|||
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
|
||||
stream.save()
|
||||
|
||||
# Remove the old stream information from memcached.
|
||||
# Remove the old stream information from remote cache.
|
||||
old_cache_key = get_stream_cache_key(old_name, stream.realm)
|
||||
cache_delete(old_cache_key)
|
||||
|
||||
|
@ -609,7 +609,7 @@ def do_send_messages(messages):
|
|||
for message in messages:
|
||||
cache_save_message(message['message'])
|
||||
# Render Markdown etc. here and store (automatically) in
|
||||
# memcached, so that the single-threaded Tornado server
|
||||
# remote cache, so that the single-threaded Tornado server
|
||||
# doesn't have to.
|
||||
user_flags = user_message_flags.get(message['message'].id, {})
|
||||
sender = message['message'].sender
|
||||
|
@ -2244,7 +2244,7 @@ def do_update_message(user_profile, message_id, subject, propagate_mode, content
|
|||
|
||||
for m in messages_list:
|
||||
# The cached ORM object is not changed by messages.update()
|
||||
# and the memcached update requires the new value
|
||||
# and the remote cache update requires the new value
|
||||
m.subject = subject
|
||||
|
||||
changed_messages += messages_list
|
||||
|
|
|
@ -41,7 +41,8 @@ Usage: python2.7 manage.py create_realm --domain=foo.com --name='Foo, Inc.'"""
|
|||
|
||||
def validate_domain(self, domain):
|
||||
# Domains can't contain whitespace if they are to be used in memcached
|
||||
# keys.
|
||||
# keys. Seems safer to leave that as the default case regardless of
|
||||
# which backing store we use.
|
||||
if re.search("\s", domain):
|
||||
raise ValueError("Domains can't contain whitespace")
|
||||
|
||||
|
|
|
@ -1087,7 +1087,7 @@ class GetOldMessagesTest(AuthedTestCase):
|
|||
m = Message.objects.all().order_by('-id')[0]
|
||||
m.rendered_content = m.rendered_content_version = None
|
||||
m.content = 'test content'
|
||||
# Use to_dict_uncached directly to avoid having to deal with memcached
|
||||
# Use to_dict_uncached directly to avoid having to deal with remote cache
|
||||
d = m.to_dict_uncached(True)
|
||||
self.assertEqual(d['content'], '<p>test content</p>')
|
||||
|
||||
|
|
|
@ -548,7 +548,7 @@ def get_old_messages_backend(request, user_profile,
|
|||
# 'user_messages' dictionary maps each message to the user's
|
||||
# UserMessage object for that message, which we will attach to the
|
||||
# rendered message dict before returning it. We attempt to
|
||||
# bulk-fetch rendered message dicts from memcached using the
|
||||
# bulk-fetch rendered message dicts from remote cache using the
|
||||
# 'messages' list.
|
||||
search_fields = dict()
|
||||
message_ids = []
|
||||
|
|
Loading…
Reference in New Issue