2017-09-24 18:43:30 +02:00
|
|
|
# System documented in https://zulip.readthedocs.io/en/latest/logging.html
|
2013-10-10 21:37:26 +02:00
|
|
|
|
2017-04-15 04:03:56 +02:00
|
|
|
from django.utils.timezone import now as timezone_now
|
2017-04-15 03:29:56 +02:00
|
|
|
from django.utils.timezone import utc as timezone_utc
|
2017-02-26 04:58:25 +01:00
|
|
|
|
2016-08-27 07:55:54 +02:00
|
|
|
import hashlib
|
2013-03-15 17:03:56 +01:00
|
|
|
import logging
|
2017-09-16 17:07:06 +02:00
|
|
|
import re
|
2016-08-27 07:55:54 +02:00
|
|
|
import traceback
|
2017-09-27 03:08:43 +02:00
|
|
|
from typing import Optional
|
2012-12-05 18:01:43 +01:00
|
|
|
from datetime import datetime, timedelta
|
2017-03-22 22:09:38 +01:00
|
|
|
from django.conf import settings
|
2016-09-10 00:36:17 +02:00
|
|
|
from zerver.lib.str_utils import force_bytes
|
2017-08-25 15:25:00 +02:00
|
|
|
from logging import Logger
|
2012-12-05 18:01:43 +01:00
|
|
|
|
|
|
|
# Adapted http://djangosnippets.org/snippets/2242/ by user s29 (October 25, 2010)
|
|
|
|
|
2017-11-05 11:37:41 +01:00
|
|
|
class _RateLimitFilter:
|
2017-04-15 03:29:56 +02:00
|
|
|
last_error = datetime.min.replace(tzinfo=timezone_utc)
|
2012-12-05 18:01:43 +01:00
|
|
|
|
|
|
|
def filter(self, record):
|
2016-06-05 04:20:00 +02:00
|
|
|
# type: (logging.LogRecord) -> bool
|
2012-12-05 18:01:43 +01:00
|
|
|
from django.conf import settings
|
|
|
|
from django.core.cache import cache
|
|
|
|
|
|
|
|
# Track duplicate errors
|
|
|
|
duplicate = False
|
2016-11-30 22:49:02 +01:00
|
|
|
rate = getattr(settings, '%s_LIMIT' % self.__class__.__name__.upper(),
|
2016-12-03 00:04:17 +01:00
|
|
|
600) # seconds
|
2012-12-05 18:01:43 +01:00
|
|
|
if rate > 0:
|
|
|
|
# Test if the cache works
|
|
|
|
try:
|
|
|
|
cache.set('RLF_TEST_KEY', 1, 1)
|
|
|
|
use_cache = cache.get('RLF_TEST_KEY') == 1
|
2017-03-05 10:25:27 +01:00
|
|
|
except Exception:
|
2012-12-05 18:01:43 +01:00
|
|
|
use_cache = False
|
|
|
|
|
|
|
|
if use_cache:
|
2016-12-20 20:42:57 +01:00
|
|
|
if record.exc_info is not None:
|
|
|
|
tb = force_bytes('\n'.join(traceback.format_exception(*record.exc_info)))
|
|
|
|
else:
|
2017-11-03 03:12:25 +01:00
|
|
|
tb = force_bytes('%s' % (record,))
|
2016-08-27 07:55:54 +02:00
|
|
|
key = self.__class__.__name__.upper() + hashlib.sha1(tb).hexdigest()
|
2012-12-06 22:00:34 +01:00
|
|
|
duplicate = cache.get(key) == 1
|
2016-08-27 07:55:54 +02:00
|
|
|
if not duplicate:
|
|
|
|
cache.set(key, 1, rate)
|
2012-12-05 18:01:43 +01:00
|
|
|
else:
|
2017-04-15 04:03:56 +02:00
|
|
|
min_date = timezone_now() - timedelta(seconds=rate)
|
2012-12-05 18:01:43 +01:00
|
|
|
duplicate = (self.last_error >= min_date)
|
|
|
|
if not duplicate:
|
2017-04-15 04:03:56 +02:00
|
|
|
self.last_error = timezone_now()
|
2012-12-05 18:01:43 +01:00
|
|
|
|
|
|
|
return not duplicate
|
2012-12-06 22:00:34 +01:00
|
|
|
|
2013-08-06 21:37:34 +02:00
|
|
|
class ZulipLimiter(_RateLimitFilter):
|
2012-12-06 22:00:34 +01:00
|
|
|
pass
|
|
|
|
|
|
|
|
class EmailLimiter(_RateLimitFilter):
|
|
|
|
pass
|
2013-03-15 17:03:56 +01:00
|
|
|
|
|
|
|
class ReturnTrue(logging.Filter):
|
|
|
|
def filter(self, record):
|
2016-06-05 04:20:00 +02:00
|
|
|
# type: (logging.LogRecord) -> bool
|
2013-03-15 17:03:56 +01:00
|
|
|
return True
|
2013-06-10 18:57:59 +02:00
|
|
|
|
2017-03-22 22:09:38 +01:00
|
|
|
class ReturnEnabled(logging.Filter):
|
|
|
|
def filter(self, record):
|
|
|
|
# type: (logging.LogRecord) -> bool
|
|
|
|
return settings.LOGGING_NOT_DISABLED
|
|
|
|
|
2013-06-10 18:57:59 +02:00
|
|
|
class RequireReallyDeployed(logging.Filter):
|
|
|
|
def filter(self, record):
|
2016-06-05 04:20:00 +02:00
|
|
|
# type: (logging.LogRecord) -> bool
|
2013-06-10 18:57:59 +02:00
|
|
|
from django.conf import settings
|
2015-08-21 09:18:44 +02:00
|
|
|
return settings.PRODUCTION
|
2017-01-18 12:52:01 +01:00
|
|
|
|
|
|
|
def skip_200_and_304(record):
|
|
|
|
# type: (logging.LogRecord) -> bool
|
|
|
|
# Apparently, `status_code` is added by Django and is not an actual
|
|
|
|
# attribute of LogRecord; as a result, mypy throws an error if we
|
|
|
|
# access the `status_code` attribute directly.
|
|
|
|
if getattr(record, 'status_code') in [200, 304]:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return True
|
2017-03-15 10:08:27 +01:00
|
|
|
|
2017-09-16 17:07:06 +02:00
|
|
|
IGNORABLE_404_URLS = [
|
|
|
|
re.compile(r'^/apple-touch-icon.*\.png$'),
|
|
|
|
re.compile(r'^/favicon\.ico$'),
|
|
|
|
re.compile(r'^/robots\.txt$'),
|
|
|
|
re.compile(r'^/django_static_404.html$'),
|
|
|
|
re.compile(r'^/wp-login.php$'),
|
|
|
|
]
|
|
|
|
|
|
|
|
def skip_boring_404s(record):
|
|
|
|
# type: (logging.LogRecord) -> bool
|
|
|
|
"""Prevents Django's 'Not Found' warnings from being logged for common
|
|
|
|
404 errors that don't reflect a problem in Zulip. The overall
|
|
|
|
result is to keep the Zulip error logs cleaner than they would
|
|
|
|
otherwise be.
|
|
|
|
|
|
|
|
Assumes that its input is a django.request log record.
|
|
|
|
"""
|
|
|
|
# Apparently, `status_code` is added by Django and is not an actual
|
|
|
|
# attribute of LogRecord; as a result, mypy throws an error if we
|
|
|
|
# access the `status_code` attribute directly.
|
|
|
|
if getattr(record, 'status_code') != 404:
|
|
|
|
return True
|
|
|
|
|
|
|
|
# We're only interested in filtering the "Not Found" errors.
|
|
|
|
if getattr(record, 'msg') != 'Not Found: %s':
|
|
|
|
return True
|
|
|
|
|
|
|
|
path = getattr(record, 'args', [''])[0]
|
|
|
|
for pattern in IGNORABLE_404_URLS:
|
|
|
|
if re.match(pattern, path):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2017-03-15 10:08:27 +01:00
|
|
|
def skip_site_packages_logs(record):
|
|
|
|
# type: (logging.LogRecord) -> bool
|
|
|
|
# This skips the log records that are generated from libraries
|
|
|
|
# installed in site packages.
|
|
|
|
# Workaround for https://code.djangoproject.com/ticket/26886
|
|
|
|
if 'site-packages' in record.pathname:
|
|
|
|
return False
|
|
|
|
return True
|
2017-08-25 15:25:00 +02:00
|
|
|
|
2017-09-27 03:08:43 +02:00
|
|
|
def find_log_caller_module(record):
|
|
|
|
# type: (logging.LogRecord) -> Optional[str]
|
|
|
|
'''Find the module name corresponding to where this record was logged.'''
|
|
|
|
# Repeat a search similar to that in logging.Logger.findCaller.
|
|
|
|
# The logging call should still be on the stack somewhere; search until
|
|
|
|
# we find something in the same source file, and that should give the
|
|
|
|
# right module name.
|
|
|
|
f = logging.currentframe() # type: ignore # Not in typeshed, and arguably shouldn't be
|
|
|
|
while f is not None:
|
|
|
|
if f.f_code.co_filename == record.pathname:
|
|
|
|
return f.f_globals.get('__name__')
|
|
|
|
f = f.f_back
|
|
|
|
return None
|
|
|
|
|
2017-09-27 23:46:44 +02:00
|
|
|
logger_nicknames = {
|
|
|
|
'root': '', # This one is more like undoing a nickname.
|
|
|
|
'zulip.requests': 'zr', # Super common.
|
|
|
|
}
|
|
|
|
|
2017-09-27 01:42:03 +02:00
|
|
|
def find_log_origin(record):
|
|
|
|
# type: (logging.LogRecord) -> str
|
2017-09-27 03:08:43 +02:00
|
|
|
logger_name = logger_nicknames.get(record.name, record.name)
|
|
|
|
|
|
|
|
if settings.LOGGING_SHOW_MODULE:
|
|
|
|
module_name = find_log_caller_module(record)
|
|
|
|
if module_name == logger_name or module_name == record.name:
|
|
|
|
# Abbreviate a bit.
|
|
|
|
return logger_name
|
|
|
|
else:
|
|
|
|
return '{}/{}'.format(logger_name, module_name or '?')
|
|
|
|
else:
|
|
|
|
return logger_name
|
2017-09-27 01:42:03 +02:00
|
|
|
|
2017-09-27 23:53:48 +02:00
|
|
|
log_level_abbrevs = {
|
|
|
|
'DEBUG': 'DEBG',
|
|
|
|
'INFO': 'INFO',
|
|
|
|
'WARNING': 'WARN',
|
|
|
|
'ERROR': 'ERR',
|
|
|
|
'CRITICAL': 'CRIT',
|
|
|
|
}
|
|
|
|
|
|
|
|
def abbrev_log_levelname(levelname):
|
|
|
|
# type: (str) -> str
|
|
|
|
# It's unlikely someone will set a custom log level with a custom name,
|
|
|
|
# but it's an option, so we shouldn't crash if someone does.
|
|
|
|
return log_level_abbrevs.get(levelname, levelname[:4])
|
|
|
|
|
2017-09-27 23:40:38 +02:00
|
|
|
class ZulipFormatter(logging.Formatter):
|
2017-09-28 00:38:54 +02:00
|
|
|
# Used in the base implementation. Default uses `,`.
|
|
|
|
default_msec_format = '%s.%03d'
|
|
|
|
|
2017-09-27 23:40:38 +02:00
|
|
|
def __init__(self):
|
|
|
|
# type: () -> None
|
2017-10-05 01:41:00 +02:00
|
|
|
super().__init__(fmt=self._compute_fmt())
|
|
|
|
|
|
|
|
def _compute_fmt(self):
|
|
|
|
# type: () -> str
|
|
|
|
pieces = ['%(asctime)s', '%(zulip_level_abbrev)-4s']
|
|
|
|
if settings.LOGGING_SHOW_PID:
|
|
|
|
pieces.append('pid:%(process)d')
|
|
|
|
pieces.extend(['[%(zulip_origin)s]', '%(message)s'])
|
|
|
|
return ' '.join(pieces)
|
2017-09-27 23:40:38 +02:00
|
|
|
|
|
|
|
def format(self, record):
|
|
|
|
# type: (logging.LogRecord) -> str
|
2017-09-27 01:42:03 +02:00
|
|
|
if not getattr(record, 'zulip_decorated', False):
|
|
|
|
# The `setattr` calls put this logic explicitly outside the bounds of the
|
|
|
|
# type system; otherwise mypy would complain LogRecord lacks these attributes.
|
2017-09-27 23:53:48 +02:00
|
|
|
setattr(record, 'zulip_level_abbrev', abbrev_log_levelname(record.levelname))
|
2017-09-27 01:42:03 +02:00
|
|
|
setattr(record, 'zulip_origin', find_log_origin(record))
|
|
|
|
setattr(record, 'zulip_decorated', True)
|
2017-09-27 23:40:38 +02:00
|
|
|
return super().format(record)
|
|
|
|
|
2017-08-25 15:25:00 +02:00
|
|
|
def create_logger(name, log_file, log_level, log_format="%(asctime)s %(levelname)-8s %(message)s"):
|
|
|
|
# type: (str, str, str, str) -> Logger
|
|
|
|
"""Creates a named logger for use in logging content to a certain
|
|
|
|
file. A few notes:
|
|
|
|
|
|
|
|
* "name" is used in determining what gets logged to which files;
|
|
|
|
see "loggers" in zproject/settings.py for details. Don't use `""`
|
|
|
|
-- that's the root logger.
|
|
|
|
* "log_file" should be declared in zproject/settings.py in ZULIP_PATHS.
|
|
|
|
|
|
|
|
"""
|
|
|
|
logging.basicConfig(format=log_format)
|
|
|
|
logger = logging.getLogger(name)
|
|
|
|
logger.setLevel(getattr(logging, log_level))
|
|
|
|
|
|
|
|
if log_file:
|
|
|
|
formatter = logging.Formatter(log_format)
|
|
|
|
file_handler = logging.FileHandler(log_file)
|
|
|
|
file_handler.setFormatter(formatter)
|
|
|
|
logger.addHandler(file_handler)
|
|
|
|
|
|
|
|
return logger
|