mirror of https://github.com/zulip/zulip.git
logging: Fix request processing time to not count initialization.
Previously, we counted not just the time required to process a particular request, but also the time required to import+find the view function via urls.py. The latter is usually fast, but when a new Django thread starts up, it can take significant time, resulting in us flagging slow requests on each server restart and also when a new Django thread starts up on prod to handle requests. This change means that we no longer include that startup time as part of request processing time -- but we still log it in the case that it was more than 5ms, so that we can identify why a particular request was slower than expected if high startup times become a problem. We annotate the time in log lines as "+start" rather than just "start" to make clear that it's not counted in the total. (imported from commit c677682e23b26005060390d85d386234f4f463ad)
This commit is contained in:
parent
d44c6636c6
commit
26f7b783a2
|
@ -104,6 +104,10 @@ def write_log_line(log_data, path, method, remote_ip, email, client_name,
|
|||
statsd.timing("%s.memcached.time" % (statsd_path,), timedelta_ms(memcached_time_delta))
|
||||
statsd.incr("%s.memcached.querycount" % (statsd_path,), memcached_count_delta)
|
||||
|
||||
startup_output = ""
|
||||
if 'startup_time_delta' in log_data and log_data["startup_time_delta"] > 0.005:
|
||||
startup_output = " (+start: %s)" % (format_timedelta(log_data["startup_time_delta"]))
|
||||
|
||||
bugdown_output = ""
|
||||
if 'bugdown_time_start' in log_data:
|
||||
bugdown_time_delta = get_bugdown_time() - log_data['bugdown_time_start']
|
||||
|
@ -141,10 +145,10 @@ def write_log_line(log_data, path, method, remote_ip, email, client_name,
|
|||
else:
|
||||
extra_request_data = ""
|
||||
logger_client = "(%s via %s)" % (email, client_name)
|
||||
logger_timing = '%5s%s%s%s%s %s' % \
|
||||
logger_timing = '%5s%s%s%s%s%s %s' % \
|
||||
(format_timedelta(time_delta), optional_orig_delta,
|
||||
memcached_output, bugdown_output,
|
||||
db_time_output, path)
|
||||
db_time_output, startup_output, path)
|
||||
logger_line = '%-15s %-7s %3d %s%s %s' % \
|
||||
(remote_ip, method, status_code,
|
||||
logger_timing, extra_request_data, logger_client)
|
||||
|
@ -165,11 +169,25 @@ def write_log_line(log_data, path, method, remote_ip, email, client_name,
|
|||
logger.info('status=%3d, data=%s, uid=%s' % (status_code, error_content, email))
|
||||
|
||||
class LogRequests(object):
|
||||
# We primarily are doing logging using the process_view hook, but
|
||||
# for some views, process_view isn't run, so we call the start
|
||||
# method here too
|
||||
def process_request(self, request):
|
||||
request._log_data = dict()
|
||||
record_request_start_data(request._log_data)
|
||||
connection.queries = []
|
||||
|
||||
def process_view(self, request, view_func, args, kwargs):
|
||||
# process_request was already run; we save the initialization
|
||||
# time (i.e. the time between receiving the request and
|
||||
# figuring out which view function to call, which is primarily
|
||||
# importing modules on the first start)
|
||||
request._log_data["startup_time_delta"] = time.time() - request._log_data["time_started"]
|
||||
# And then completely reset our tracking to only cover work
|
||||
# done as part of this request
|
||||
record_request_start_data(request._log_data)
|
||||
connection.queries = []
|
||||
|
||||
def process_response(self, request, response):
|
||||
# The reverse proxy might have sent us the real external IP
|
||||
remote_ip = request.META.get('HTTP_X_REAL_IP')
|
||||
|
|
Loading…
Reference in New Issue