2017-03-22 22:09:38 +01:00
|
|
|
import logging
|
|
|
|
import sys
|
2020-06-11 00:54:34 +02:00
|
|
|
from functools import wraps
|
|
|
|
from types import TracebackType
|
|
|
|
from typing import Any, Callable, Dict, Iterator, NoReturn, Optional, Tuple, Type
|
|
|
|
from unittest.mock import MagicMock, patch
|
2017-03-22 22:09:38 +01:00
|
|
|
|
|
|
|
from django.conf import settings
|
|
|
|
from django.contrib.auth.models import AnonymousUser
|
2019-02-02 23:53:44 +01:00
|
|
|
from django.http import HttpRequest
|
|
|
|
from django.test import TestCase
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
from django.utils.log import AdminEmailHandler
|
2017-03-22 22:09:38 +01:00
|
|
|
|
|
|
|
from zerver.lib.test_classes import ZulipTestCase
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.types import ViewFuncT
|
2017-11-30 23:06:21 +01:00
|
|
|
from zerver.logging_handlers import AdminNotifyHandler
|
2017-03-22 22:09:38 +01:00
|
|
|
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
captured_request: Optional[HttpRequest] = None
|
|
|
|
captured_exc_info: Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]] = None
|
2018-05-10 19:00:29 +02:00
|
|
|
def capture_and_throw(domain: Optional[str]=None) -> Callable[[ViewFuncT], ViewFuncT]:
|
2018-03-14 23:17:07 +01:00
|
|
|
def wrapper(view_func: ViewFuncT) -> ViewFuncT:
|
2017-03-22 22:09:38 +01:00
|
|
|
@wraps(view_func)
|
2017-11-17 10:47:43 +01:00
|
|
|
def wrapped_view(request: HttpRequest, *args: Any, **kwargs: Any) -> NoReturn:
|
2017-03-22 22:09:38 +01:00
|
|
|
global captured_request
|
|
|
|
captured_request = request
|
|
|
|
try:
|
|
|
|
raise Exception("Request error")
|
|
|
|
except Exception as e:
|
|
|
|
global captured_exc_info
|
|
|
|
captured_exc_info = sys.exc_info()
|
|
|
|
raise e
|
2020-04-22 04:13:37 +02:00
|
|
|
return wrapped_view # type: ignore[return-value] # https://github.com/python/mypy/issues/1927
|
2017-03-22 22:09:38 +01:00
|
|
|
return wrapper
|
|
|
|
|
2017-11-30 23:06:21 +01:00
|
|
|
class AdminNotifyHandlerTest(ZulipTestCase):
|
2017-03-22 22:09:38 +01:00
|
|
|
logger = logging.getLogger('django')
|
|
|
|
|
2017-11-17 10:47:43 +01:00
|
|
|
def setUp(self) -> None:
|
2019-10-19 20:47:00 +02:00
|
|
|
super().setUp()
|
2017-11-30 23:06:21 +01:00
|
|
|
self.handler = AdminNotifyHandler()
|
2017-03-22 22:09:38 +01:00
|
|
|
# Prevent the exceptions we're going to raise from being printed
|
|
|
|
# You may want to disable this when debugging tests
|
2018-03-21 00:53:39 +01:00
|
|
|
settings.LOGGING_ENABLED = False
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2017-04-26 03:52:12 +02:00
|
|
|
global captured_exc_info
|
|
|
|
global captured_request
|
|
|
|
captured_request = None
|
|
|
|
captured_exc_info = None
|
|
|
|
|
2017-11-17 10:47:43 +01:00
|
|
|
def tearDown(self) -> None:
|
2018-03-21 00:53:39 +01:00
|
|
|
settings.LOGGING_ENABLED = True
|
2019-10-18 16:11:48 +02:00
|
|
|
super().tearDown()
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2017-11-30 23:06:21 +01:00
|
|
|
def get_admin_zulip_handler(self) -> AdminNotifyHandler:
|
2017-09-27 02:28:28 +02:00
|
|
|
return [
|
|
|
|
h for h in logging.getLogger('').handlers
|
2017-11-30 23:06:21 +01:00
|
|
|
if isinstance(h, AdminNotifyHandler)
|
2017-03-26 06:54:56 +02:00
|
|
|
][0]
|
|
|
|
|
2018-03-11 17:29:38 +01:00
|
|
|
@patch('zerver.logging_handlers.try_git_describe')
|
|
|
|
def test_basic(self, mock_function: MagicMock) -> None:
|
|
|
|
mock_function.return_value = None
|
2017-11-30 23:06:21 +01:00
|
|
|
"""A random exception passes happily through AdminNotifyHandler"""
|
2017-09-27 02:28:28 +02:00
|
|
|
handler = self.get_admin_zulip_handler()
|
2017-03-26 06:54:56 +02:00
|
|
|
try:
|
|
|
|
raise Exception("Testing Error!")
|
|
|
|
except Exception:
|
|
|
|
exc_info = sys.exc_info()
|
2017-08-09 19:52:21 +02:00
|
|
|
record = self.logger.makeRecord('name', logging.ERROR, 'function', 16,
|
|
|
|
'message', {}, exc_info)
|
2017-03-26 06:54:56 +02:00
|
|
|
handler.emit(record)
|
|
|
|
|
2017-12-01 02:32:02 +01:00
|
|
|
def simulate_error(self) -> logging.LogRecord:
|
2020-03-06 18:40:46 +01:00
|
|
|
self.login('hamlet')
|
2017-04-26 03:52:12 +02:00
|
|
|
with patch("zerver.decorator.rate_limit") as rate_limit_patch:
|
|
|
|
rate_limit_patch.side_effect = capture_and_throw
|
|
|
|
result = self.client_get("/json/users")
|
|
|
|
self.assert_json_error(result, "Internal server error", status_code=500)
|
|
|
|
rate_limit_patch.assert_called_once()
|
|
|
|
|
2017-12-01 02:32:02 +01:00
|
|
|
record = self.logger.makeRecord('name', logging.ERROR, 'function', 15,
|
|
|
|
'message', {}, captured_exc_info)
|
2020-04-22 04:13:37 +02:00
|
|
|
record.request = captured_request # type: ignore[attr-defined] # this field is dynamically added
|
2017-12-01 02:32:02 +01:00
|
|
|
return record
|
|
|
|
|
|
|
|
def run_handler(self, record: logging.LogRecord) -> Dict[str, Any]:
|
|
|
|
with patch('zerver.lib.error_notify.notify_server_error') as patched_notify:
|
|
|
|
self.handler.emit(record)
|
|
|
|
patched_notify.assert_called_once()
|
|
|
|
return patched_notify.call_args[0][0]
|
|
|
|
|
2018-03-11 17:29:38 +01:00
|
|
|
@patch('zerver.logging_handlers.try_git_describe')
|
|
|
|
def test_long_exception_request(self, mock_function: MagicMock) -> None:
|
|
|
|
mock_function.return_value = None
|
2017-12-01 02:32:02 +01:00
|
|
|
"""A request with no stack and multi-line report.getMessage() is handled properly"""
|
|
|
|
record = self.simulate_error()
|
|
|
|
record.exc_info = None
|
|
|
|
record.msg = 'message\nmoremesssage\nmore'
|
2017-04-26 03:52:12 +02:00
|
|
|
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
|
|
|
self.assertIn("user_email", report)
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
self.assertEqual(report['stack_trace'], 'message\nmoremesssage\nmore')
|
|
|
|
self.assertEqual(report['message'], 'message')
|
2017-04-26 03:52:12 +02:00
|
|
|
|
2018-03-11 17:29:38 +01:00
|
|
|
@patch('zerver.logging_handlers.try_git_describe')
|
|
|
|
def test_request(self, mock_function: MagicMock) -> None:
|
|
|
|
mock_function.return_value = None
|
2017-03-22 22:09:38 +01:00
|
|
|
"""A normal request is handled properly"""
|
2017-12-01 02:32:02 +01:00
|
|
|
record = self.simulate_error()
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
|
|
|
self.assertIn("user_email", report)
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
|
|
|
|
# Test that `add_request_metadata` throwing an exception is fine
|
|
|
|
with patch("zerver.logging_handlers.traceback.print_exc"):
|
|
|
|
with patch("zerver.logging_handlers.add_request_metadata",
|
|
|
|
side_effect=Exception("Unexpected exception!")):
|
2017-03-26 07:00:08 +02:00
|
|
|
report = self.run_handler(record)
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertNotIn("user_email", report)
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertEqual(report["stack_trace"], "See /var/log/zulip/errors.log")
|
|
|
|
|
|
|
|
# Check anonymous user is handled correctly
|
2020-04-22 04:13:37 +02:00
|
|
|
record.request.user = AnonymousUser() # type: ignore[attr-defined] # this field is dynamically added
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
|
|
|
self.assertIn("host", report)
|
|
|
|
self.assertIn("user_email", report)
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
|
|
|
|
# Now simulate a DisallowedHost exception
|
|
|
|
def get_host_error() -> None:
|
|
|
|
raise Exception("Get Host Failure!")
|
2020-04-22 04:13:37 +02:00
|
|
|
orig_get_host = record.request.get_host # type: ignore[attr-defined] # this field is dynamically added
|
|
|
|
record.request.get_host = get_host_error # type: ignore[attr-defined] # this field is dynamically added
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
2020-04-22 04:13:37 +02:00
|
|
|
record.request.get_host = orig_get_host # type: ignore[attr-defined] # this field is dynamically added
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("host", report)
|
|
|
|
self.assertIn("user_email", report)
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
|
|
|
|
# Test an exception_filter exception
|
|
|
|
with patch("zerver.logging_handlers.get_exception_reporter_filter",
|
|
|
|
return_value=15):
|
2020-04-22 04:13:37 +02:00
|
|
|
record.request.method = "POST" # type: ignore[attr-defined] # this field is dynamically added
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
2020-04-22 04:13:37 +02:00
|
|
|
record.request.method = "GET" # type: ignore[attr-defined] # this field is dynamically added
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("host", report)
|
|
|
|
self.assertIn("user_email", report)
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
|
|
|
|
# Test the catch-all exception handler doesn't throw
|
|
|
|
with patch('zerver.lib.error_notify.notify_server_error',
|
|
|
|
side_effect=Exception("queue error")):
|
|
|
|
self.handler.emit(record)
|
|
|
|
with self.settings(STAGING_ERROR_NOTIFICATIONS=False):
|
|
|
|
with patch('zerver.logging_handlers.queue_json_publish',
|
2017-03-26 07:00:08 +02:00
|
|
|
side_effect=Exception("queue error")):
|
|
|
|
self.handler.emit(record)
|
|
|
|
|
2017-12-01 02:24:57 +01:00
|
|
|
# Test no exc_info
|
|
|
|
record.exc_info = None
|
|
|
|
report = self.run_handler(record)
|
|
|
|
self.assertIn("host", report)
|
|
|
|
self.assertIn("user_email", report)
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertEqual(report["stack_trace"], 'No stack trace available')
|
|
|
|
|
|
|
|
# Test arbitrary exceptions from request.user
|
2020-04-22 04:13:37 +02:00
|
|
|
record.request.user = None # type: ignore[attr-defined] # this field is dynamically added
|
2017-12-01 02:24:57 +01:00
|
|
|
with patch("zerver.logging_handlers.traceback.print_exc"):
|
2017-03-26 07:00:08 +02:00
|
|
|
report = self.run_handler(record)
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("host", report)
|
|
|
|
self.assertIn("user_email", report)
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
|
|
|
|
class LoggingConfigTest(TestCase):
|
|
|
|
@staticmethod
|
2017-11-17 10:47:43 +01:00
|
|
|
def all_loggers() -> Iterator[logging.Logger]:
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
# There is no documented API for enumerating the loggers; but the
|
|
|
|
# internals of `logging` haven't changed in ages, so just use them.
|
2020-04-22 04:13:37 +02:00
|
|
|
loggerDict = logging.Logger.manager.loggerDict # type: ignore[attr-defined]
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
for logger in loggerDict.values():
|
|
|
|
if not isinstance(logger, logging.Logger):
|
|
|
|
continue
|
|
|
|
yield logger
|
|
|
|
|
2017-11-17 10:47:43 +01:00
|
|
|
def test_django_emails_disabled(self) -> None:
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
for logger in self.all_loggers():
|
|
|
|
# The `handlers` attribute is undocumented, but see comment on
|
|
|
|
# `all_loggers`.
|
|
|
|
for handler in logger.handlers:
|
|
|
|
assert not isinstance(handler, AdminEmailHandler)
|
2018-10-19 23:53:33 +02:00
|
|
|
|
|
|
|
class ErrorFiltersTest(TestCase):
|
|
|
|
def test_clean_data_from_query_parameters(self) -> None:
|
|
|
|
from zerver.filters import clean_data_from_query_parameters
|
|
|
|
self.assertEqual(clean_data_from_query_parameters("api_key=abcdz&stream=1"),
|
|
|
|
"api_key=******&stream=******")
|
|
|
|
self.assertEqual(clean_data_from_query_parameters("api_key=abcdz&stream=foo&topic=bar"),
|
|
|
|
"api_key=******&stream=******&topic=******")
|
errors: Fix infinite recursion during memcached outages.
We have a very useful piece of code, _RateLimitFilter, which is
designed to avoid sending us a billion error emails in the event that
a Zulip production server is down in a way that throws the same
exception a lot. The code uses memcached to ensure we send each
traceback roughly once per Zulip server per 10 minutes (or if
memcached is unavailable, at most 1/process/10 minutes, since we use
memcached to coordinate between processes)
However, if memcached is down, there is a logging.error call internal
to the Django/memcached setup that happens inside the cache.set() call,
and those aren't caught by the `except Exception` block around it.
This ends up resulting in infinite recursion, eventually leading to
Fatal Python error: Cannot recover from stack overflow., since this
handler is configured to run for logging.error in addition to
logging.exception.
We fix this using a thread-local variable to detect whether we are
being called recursively.
This change should prevent some nasty failure modes we've had in the
past where memcached being down resulted in infinite recursion
(resulting in extra resources being consumed by our error
notifications code, and most importantly, the error notifications not
being sent).
Fixes #12595.
2019-09-03 06:08:14 +02:00
|
|
|
|
|
|
|
class RateLimitFilterTest(ZulipTestCase):
|
|
|
|
def test_recursive_filter_handling(self) -> None:
|
|
|
|
def mocked_cache_get(key: str) -> int:
|
|
|
|
logging.error("Log an error to trigger recursive filter() calls in _RateLimitFilter.")
|
|
|
|
raise Exception
|
|
|
|
|
|
|
|
with patch("zerver.lib.logging_util.cache.get", side_effect=mocked_cache_get) as m:
|
|
|
|
logging.error("Log an error to trigger initial _RateLimitFilter.filter() call.")
|
|
|
|
# cache.get should have only been called once, by the original filter() call:
|
|
|
|
m.assert_called_once()
|