Call reset_queries in our queue workers.

This fixes a small memory leak in our queue workers, where we don't
reset the accumulated times contained in our query logging data.

Longer-term, we may want to make something mergable for mainline where
we only store on the connection object the totals; that would be a
fixed amount of emmory per connection and thus not have this problem.

(imported from commit 914fa13acfb576f73c5f35e0f64c2f4d8a56b111)
This commit is contained in:
Tim Abbott 2013-11-19 14:32:20 -05:00
parent 837b066f49
commit 9ba820703a
1 changed files with 5 additions and 0 deletions

View File

@ -17,6 +17,7 @@ from zerver.lib.digest import handle_digest_email
from zerver.decorator import JsonableError
from zerver.lib.socket import req_redis_key
from confirmation.models import Confirmation
from django.db import reset_queries
import os
import sys
@ -65,6 +66,7 @@ class QueueProcessingWorker(object):
with lockfile(lock_fn):
with open(fn, 'a') as f:
f.write(line)
reset_queries()
def _log_problem(self):
logging.exception("Problem handling data on queue %s" % (self.queue_name,))
@ -193,6 +195,7 @@ class MissedMessageWorker(QueueProcessingWorker):
for user_profile_id, events in by_recipient.items():
handle_missedmessage_emails(user_profile_id, events)
reset_queries()
# Aggregate all messages received every 2 minutes to let someone finish sending a batch
# of messages
time.sleep(2 * 60)
@ -259,6 +262,8 @@ class SlowQueryWorker(QueueProcessingWorker):
internal_send_message(settings.ERROR_BOT, "stream", "logs", topic, content)
reset_queries()
@assign_queue("message_sender")
class MessageSenderWorker(QueueProcessingWorker):
def __init__(self):