2017-03-22 22:09:38 +01:00
|
|
|
import logging
|
|
|
|
import sys
|
2020-06-11 00:54:34 +02:00
|
|
|
from functools import wraps
|
|
|
|
from types import TracebackType
|
2022-08-21 04:03:39 +02:00
|
|
|
from typing import Callable, Dict, Iterator, NoReturn, Optional, Tuple, Type, Union
|
2022-07-28 19:08:55 +02:00
|
|
|
from unittest import mock
|
2020-06-11 00:54:34 +02:00
|
|
|
from unittest.mock import MagicMock, patch
|
2017-03-22 22:09:38 +01:00
|
|
|
|
|
|
|
from django.conf import settings
|
|
|
|
from django.contrib.auth.models import AnonymousUser
|
2022-08-21 04:03:39 +02:00
|
|
|
from django.http import HttpRequest, HttpResponse
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
from django.utils.log import AdminEmailHandler
|
2022-08-21 04:03:39 +02:00
|
|
|
from typing_extensions import Concatenate, ParamSpec
|
2017-03-22 22:09:38 +01:00
|
|
|
|
|
|
|
from zerver.lib.test_classes import ZulipTestCase
|
2020-08-14 10:03:36 +02:00
|
|
|
from zerver.lib.test_helpers import mock_queue_publish
|
2020-06-23 22:53:08 +02:00
|
|
|
from zerver.logging_handlers import AdminNotifyHandler, HasRequest
|
2022-08-21 04:03:39 +02:00
|
|
|
from zerver.models import UserProfile
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2022-08-21 04:03:39 +02:00
|
|
|
ParamT = ParamSpec("ParamT")
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
captured_request: Optional[HttpRequest] = None
|
2021-02-12 08:19:30 +01:00
|
|
|
captured_exc_info: Optional[
|
|
|
|
Union[Tuple[Type[BaseException], BaseException, TracebackType], Tuple[None, None, None]]
|
|
|
|
] = None
|
|
|
|
|
|
|
|
|
2022-08-21 04:03:39 +02:00
|
|
|
def capture_and_throw(
|
|
|
|
view_func: Callable[Concatenate[HttpRequest, UserProfile, ParamT], HttpResponse]
|
|
|
|
) -> Callable[Concatenate[HttpRequest, ParamT], NoReturn]:
|
2022-07-28 13:59:22 +02:00
|
|
|
@wraps(view_func)
|
2022-08-21 04:03:39 +02:00
|
|
|
def wrapped_view(
|
|
|
|
request: HttpRequest,
|
|
|
|
/,
|
|
|
|
*args: ParamT.args,
|
|
|
|
**kwargs: ParamT.kwargs,
|
|
|
|
) -> NoReturn:
|
2022-07-28 13:59:22 +02:00
|
|
|
global captured_request
|
|
|
|
captured_request = request
|
|
|
|
try:
|
|
|
|
raise Exception("Request error")
|
|
|
|
except Exception as e:
|
|
|
|
global captured_exc_info
|
|
|
|
captured_exc_info = sys.exc_info()
|
|
|
|
raise e
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2022-08-21 04:03:39 +02:00
|
|
|
return wrapped_view
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-30 23:06:21 +01:00
|
|
|
class AdminNotifyHandlerTest(ZulipTestCase):
|
2021-02-12 08:20:45 +01:00
|
|
|
logger = logging.getLogger("django")
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2017-11-17 10:47:43 +01:00
|
|
|
def setUp(self) -> None:
|
2019-10-19 20:47:00 +02:00
|
|
|
super().setUp()
|
2017-11-30 23:06:21 +01:00
|
|
|
self.handler = AdminNotifyHandler()
|
2017-03-22 22:09:38 +01:00
|
|
|
# Prevent the exceptions we're going to raise from being printed
|
|
|
|
# You may want to disable this when debugging tests
|
2018-03-21 00:53:39 +01:00
|
|
|
settings.LOGGING_ENABLED = False
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2017-04-26 03:52:12 +02:00
|
|
|
global captured_exc_info
|
|
|
|
global captured_request
|
|
|
|
captured_request = None
|
|
|
|
captured_exc_info = None
|
|
|
|
|
2017-11-17 10:47:43 +01:00
|
|
|
def tearDown(self) -> None:
|
2018-03-21 00:53:39 +01:00
|
|
|
settings.LOGGING_ENABLED = True
|
2019-10-18 16:11:48 +02:00
|
|
|
super().tearDown()
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2017-11-30 23:06:21 +01:00
|
|
|
def get_admin_zulip_handler(self) -> AdminNotifyHandler:
|
2021-02-12 08:20:45 +01:00
|
|
|
return [h for h in logging.getLogger("").handlers if isinstance(h, AdminNotifyHandler)][0]
|
2017-03-26 06:54:56 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
@patch("zerver.logging_handlers.try_git_describe")
|
2018-03-11 17:29:38 +01:00
|
|
|
def test_basic(self, mock_function: MagicMock) -> None:
|
|
|
|
mock_function.return_value = None
|
2017-11-30 23:06:21 +01:00
|
|
|
"""A random exception passes happily through AdminNotifyHandler"""
|
2017-09-27 02:28:28 +02:00
|
|
|
handler = self.get_admin_zulip_handler()
|
2017-03-26 06:54:56 +02:00
|
|
|
try:
|
2020-10-23 02:43:28 +02:00
|
|
|
raise Exception("Testing error!")
|
2017-03-26 06:54:56 +02:00
|
|
|
except Exception:
|
|
|
|
exc_info = sys.exc_info()
|
2021-02-12 08:19:30 +01:00
|
|
|
record = self.logger.makeRecord(
|
2021-02-12 08:20:45 +01:00
|
|
|
"name", logging.ERROR, "function", 16, "message", {}, exc_info
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2017-03-26 06:54:56 +02:00
|
|
|
handler.emit(record)
|
|
|
|
|
2017-12-01 02:32:02 +01:00
|
|
|
def simulate_error(self) -> logging.LogRecord:
|
2021-02-12 08:20:45 +01:00
|
|
|
self.login("hamlet")
|
2022-07-28 13:59:22 +02:00
|
|
|
with patch(
|
|
|
|
"zerver.lib.rest.authenticated_json_view", side_effect=capture_and_throw
|
|
|
|
) as view_decorator_patch, self.assertLogs(
|
2021-02-12 08:20:45 +01:00
|
|
|
"django.request", level="ERROR"
|
2021-02-12 08:19:30 +01:00
|
|
|
) as request_error_log, self.assertLogs(
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver.middleware.json_error_handler", level="ERROR"
|
2022-06-24 00:14:37 +02:00
|
|
|
) as json_error_handler_log, self.settings(
|
|
|
|
TEST_SUITE=False
|
|
|
|
):
|
2017-04-26 03:52:12 +02:00
|
|
|
result = self.client_get("/json/users")
|
|
|
|
self.assert_json_error(result, "Internal server error", status_code=500)
|
2022-07-28 13:59:22 +02:00
|
|
|
view_decorator_patch.assert_called_once()
|
2021-02-12 08:19:30 +01:00
|
|
|
self.assertEqual(
|
2021-02-12 08:20:45 +01:00
|
|
|
request_error_log.output, ["ERROR:django.request:Internal Server Error: /json/users"]
|
2020-07-29 22:23:56 +02:00
|
|
|
)
|
|
|
|
self.assertTrue(
|
2021-02-12 08:20:45 +01:00
|
|
|
"ERROR:zerver.middleware.json_error_handler:Traceback (most recent call last):"
|
2021-02-12 08:19:30 +01:00
|
|
|
in json_error_handler_log.output[0]
|
2020-07-29 22:23:56 +02:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
self.assertTrue("Exception: Request error" in json_error_handler_log.output[0])
|
2017-04-26 03:52:12 +02:00
|
|
|
|
2020-06-23 22:53:08 +02:00
|
|
|
record = self.logger.makeRecord(
|
2021-02-12 08:20:45 +01:00
|
|
|
"name",
|
2020-06-23 22:53:08 +02:00
|
|
|
logging.ERROR,
|
2021-02-12 08:20:45 +01:00
|
|
|
"function",
|
2020-06-23 22:53:08 +02:00
|
|
|
15,
|
2021-02-12 08:20:45 +01:00
|
|
|
"message",
|
2020-06-23 22:53:08 +02:00
|
|
|
{},
|
|
|
|
captured_exc_info,
|
|
|
|
extra={"request": captured_request},
|
|
|
|
)
|
2017-12-01 02:32:02 +01:00
|
|
|
return record
|
|
|
|
|
2020-06-24 02:10:50 +02:00
|
|
|
def run_handler(self, record: logging.LogRecord) -> Dict[str, object]:
|
2023-04-10 20:58:20 +02:00
|
|
|
with patch("zerver.worker.queue_processors.do_report_error") as patched_notify:
|
2017-12-01 02:32:02 +01:00
|
|
|
self.handler.emit(record)
|
|
|
|
patched_notify.assert_called_once()
|
|
|
|
return patched_notify.call_args[0][0]
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
@patch("zerver.logging_handlers.try_git_describe")
|
2018-03-11 17:29:38 +01:00
|
|
|
def test_long_exception_request(self, mock_function: MagicMock) -> None:
|
|
|
|
mock_function.return_value = None
|
2017-12-01 02:32:02 +01:00
|
|
|
"""A request with no stack and multi-line report.getMessage() is handled properly"""
|
|
|
|
record = self.simulate_error()
|
|
|
|
record.exc_info = None
|
2023-01-02 20:50:23 +01:00
|
|
|
record.msg = "message\nmoremessage\nmore"
|
2017-04-26 03:52:12 +02:00
|
|
|
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user", report)
|
2022-06-21 21:27:14 +02:00
|
|
|
assert isinstance(report["user"], dict)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user_email", report["user"])
|
2020-07-21 02:44:56 +02:00
|
|
|
self.assertIn("user_role", report["user"])
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
2023-01-02 20:50:23 +01:00
|
|
|
self.assertEqual(report["stack_trace"], "message\nmoremessage\nmore")
|
2021-02-12 08:20:45 +01:00
|
|
|
self.assertEqual(report["message"], "message")
|
2017-04-26 03:52:12 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
@patch("zerver.logging_handlers.try_git_describe")
|
2018-03-11 17:29:38 +01:00
|
|
|
def test_request(self, mock_function: MagicMock) -> None:
|
|
|
|
mock_function.return_value = None
|
2017-03-22 22:09:38 +01:00
|
|
|
"""A normal request is handled properly"""
|
2017-12-01 02:32:02 +01:00
|
|
|
record = self.simulate_error()
|
2020-06-23 22:53:08 +02:00
|
|
|
assert isinstance(record, HasRequest)
|
2017-03-22 22:09:38 +01:00
|
|
|
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user", report)
|
2022-06-21 21:27:14 +02:00
|
|
|
assert isinstance(report["user"], dict)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user_email", report["user"])
|
2020-07-21 02:44:56 +02:00
|
|
|
self.assertIn("user_role", report["user"])
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
|
|
|
|
# Test that `add_request_metadata` throwing an exception is fine
|
|
|
|
with patch("zerver.logging_handlers.traceback.print_exc"):
|
2021-02-12 08:19:30 +01:00
|
|
|
with patch(
|
|
|
|
"zerver.logging_handlers.add_request_metadata",
|
|
|
|
side_effect=Exception("Unexpected exception!"),
|
|
|
|
):
|
2017-03-26 07:00:08 +02:00
|
|
|
report = self.run_handler(record)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertNotIn("user", report)
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertEqual(report["stack_trace"], "See /var/log/zulip/errors.log")
|
|
|
|
|
|
|
|
# Check anonymous user is handled correctly
|
2020-06-23 22:53:08 +02:00
|
|
|
record.request.user = AnonymousUser()
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
|
|
|
self.assertIn("host", report)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user", report)
|
2022-06-21 21:27:14 +02:00
|
|
|
assert isinstance(report["user"], dict)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user_email", report["user"])
|
2020-07-21 02:44:56 +02:00
|
|
|
self.assertIn("user_role", report["user"])
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
|
2020-08-14 10:04:10 +02:00
|
|
|
# Put it back so we continue to test the non-anonymous case
|
2021-02-12 08:20:45 +01:00
|
|
|
record.request.user = self.example_user("hamlet")
|
2020-08-14 10:04:10 +02:00
|
|
|
|
2017-12-01 02:24:57 +01:00
|
|
|
# Now simulate a DisallowedHost exception
|
2022-07-28 19:08:55 +02:00
|
|
|
with mock.patch.object(
|
|
|
|
record.request, "get_host", side_effect=Exception("Get host failure!")
|
|
|
|
) as m:
|
|
|
|
report = self.run_handler(record)
|
|
|
|
self.assertIn("host", report)
|
|
|
|
self.assertIn("user", report)
|
|
|
|
assert isinstance(report["user"], dict)
|
|
|
|
self.assertIn("user_email", report["user"])
|
|
|
|
self.assertIn("user_role", report["user"])
|
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
m.assert_called_once()
|
2017-12-01 02:24:57 +01:00
|
|
|
|
|
|
|
# Test an exception_filter exception
|
2021-02-12 08:19:30 +01:00
|
|
|
with patch("zerver.logging_handlers.get_exception_reporter_filter", return_value=15):
|
2020-06-23 22:53:08 +02:00
|
|
|
record.request.method = "POST"
|
2017-12-01 02:24:57 +01:00
|
|
|
report = self.run_handler(record)
|
2020-06-23 22:53:08 +02:00
|
|
|
record.request.method = "GET"
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("host", report)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user", report)
|
2022-06-21 21:27:14 +02:00
|
|
|
assert isinstance(report["user"], dict)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user_email", report["user"])
|
2020-07-21 02:44:56 +02:00
|
|
|
self.assertIn("user_role", report["user"])
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
|
|
|
|
|
|
|
# Test the catch-all exception handler doesn't throw
|
2021-02-12 08:19:30 +01:00
|
|
|
with patch(
|
2023-04-10 20:58:20 +02:00
|
|
|
"zerver.worker.queue_processors.do_report_error", side_effect=Exception("queue error")
|
2021-02-12 08:19:30 +01:00
|
|
|
):
|
2017-12-01 02:24:57 +01:00
|
|
|
self.handler.emit(record)
|
2021-07-07 02:04:24 +02:00
|
|
|
with mock_queue_publish(
|
|
|
|
"zerver.logging_handlers.queue_json_publish", side_effect=Exception("queue error")
|
|
|
|
) as m:
|
|
|
|
with patch("logging.warning") as log_mock:
|
|
|
|
self.handler.emit(record)
|
|
|
|
m.assert_called_once()
|
|
|
|
log_mock.assert_called_once_with(
|
|
|
|
"Reporting an exception triggered an exception!", exc_info=True
|
|
|
|
)
|
|
|
|
with mock_queue_publish("zerver.logging_handlers.queue_json_publish") as m:
|
|
|
|
with patch("logging.warning") as log_mock:
|
|
|
|
self.handler.emit(record)
|
|
|
|
m.assert_called_once()
|
|
|
|
log_mock.assert_not_called()
|
2017-03-26 07:00:08 +02:00
|
|
|
|
2017-12-01 02:24:57 +01:00
|
|
|
# Test no exc_info
|
|
|
|
record.exc_info = None
|
|
|
|
report = self.run_handler(record)
|
|
|
|
self.assertIn("host", report)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user", report)
|
2022-06-21 21:27:14 +02:00
|
|
|
assert isinstance(report["user"], dict)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user_email", report["user"])
|
2020-07-21 02:44:56 +02:00
|
|
|
self.assertIn("user_role", report["user"])
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("message", report)
|
2021-02-12 08:20:45 +01:00
|
|
|
self.assertEqual(report["stack_trace"], "No stack trace available")
|
2017-12-01 02:24:57 +01:00
|
|
|
|
|
|
|
# Test arbitrary exceptions from request.user
|
2022-10-08 06:10:17 +02:00
|
|
|
del record.request.user
|
2017-12-01 02:24:57 +01:00
|
|
|
with patch("zerver.logging_handlers.traceback.print_exc"):
|
2017-03-26 07:00:08 +02:00
|
|
|
report = self.run_handler(record)
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("host", report)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user", report)
|
2022-06-21 21:27:14 +02:00
|
|
|
assert isinstance(report["user"], dict)
|
2020-07-21 02:08:10 +02:00
|
|
|
self.assertIn("user_email", report["user"])
|
2020-07-21 02:44:56 +02:00
|
|
|
self.assertIn("user_role", report["user"])
|
2017-12-01 02:24:57 +01:00
|
|
|
self.assertIn("message", report)
|
|
|
|
self.assertIn("stack_trace", report)
|
2022-07-28 19:26:07 +02:00
|
|
|
self.assertEqual(report["user"]["user_email"], None)
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-07-01 04:19:54 +02:00
|
|
|
class LoggingConfigTest(ZulipTestCase):
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
@staticmethod
|
2017-11-17 10:47:43 +01:00
|
|
|
def all_loggers() -> Iterator[logging.Logger]:
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
# There is no documented API for enumerating the loggers; but the
|
|
|
|
# internals of `logging` haven't changed in ages, so just use them.
|
2022-06-26 20:41:49 +02:00
|
|
|
for logger in logging.Logger.manager.loggerDict.values():
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
if not isinstance(logger, logging.Logger):
|
|
|
|
continue
|
|
|
|
yield logger
|
|
|
|
|
2017-11-17 10:47:43 +01:00
|
|
|
def test_django_emails_disabled(self) -> None:
|
logging: Clear out the Django default config on logger `django`.
By default, Django sets up two handlers on this logger, one of them
its AdminEmailHandler. We have our own handler for sending email on
error, and we want to stick to that -- we like the format somewhat
better, and crucially we've given it some rate-limiting through
ZulipLimiter.
Since we cleaned out our logging config in e0a5e6fad, though, we've
been sending error emails through both paths. The config we'd had
before that for `django` was redundant with the config on the root --
but having *a* config there was essential for causing
`logging.config.dictConfig`, when Django passes it our LOGGING dict,
to clear out that logger's previous config. So, give it an empty
config.
Django by default configures two loggers: `django` and
`django.server`. We have our own settings for `django.server`
anyway, so this is the only one we need to add.
The stdlib `logging` and `logging.config` docs aren't 100% clear, and
while the source of `logging` is admirably straightforward the source
of `logging.config` is a little twisty, so it's not easy to become
totally confident that this has the right effect just by reading.
Fortunately we can put some of that source-diving to work in writing
a test for it.
2017-10-13 02:43:44 +02:00
|
|
|
for logger in self.all_loggers():
|
|
|
|
# The `handlers` attribute is undocumented, but see comment on
|
|
|
|
# `all_loggers`.
|
|
|
|
for handler in logger.handlers:
|
|
|
|
assert not isinstance(handler, AdminEmailHandler)
|
2018-10-19 23:53:33 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-07-01 04:19:54 +02:00
|
|
|
class ErrorFiltersTest(ZulipTestCase):
|
2018-10-19 23:53:33 +02:00
|
|
|
def test_clean_data_from_query_parameters(self) -> None:
|
|
|
|
from zerver.filters import clean_data_from_query_parameters
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
self.assertEqual(
|
|
|
|
clean_data_from_query_parameters("api_key=abcdz&stream=1"),
|
|
|
|
"api_key=******&stream=******",
|
|
|
|
)
|
|
|
|
self.assertEqual(
|
|
|
|
clean_data_from_query_parameters("api_key=abcdz&stream=foo&topic=bar"),
|
|
|
|
"api_key=******&stream=******&topic=******",
|
|
|
|
)
|
|
|
|
|
errors: Fix infinite recursion during memcached outages.
We have a very useful piece of code, _RateLimitFilter, which is
designed to avoid sending us a billion error emails in the event that
a Zulip production server is down in a way that throws the same
exception a lot. The code uses memcached to ensure we send each
traceback roughly once per Zulip server per 10 minutes (or if
memcached is unavailable, at most 1/process/10 minutes, since we use
memcached to coordinate between processes)
However, if memcached is down, there is a logging.error call internal
to the Django/memcached setup that happens inside the cache.set() call,
and those aren't caught by the `except Exception` block around it.
This ends up resulting in infinite recursion, eventually leading to
Fatal Python error: Cannot recover from stack overflow., since this
handler is configured to run for logging.error in addition to
logging.exception.
We fix this using a thread-local variable to detect whether we are
being called recursively.
This change should prevent some nasty failure modes we've had in the
past where memcached being down resulted in infinite recursion
(resulting in extra resources being consumed by our error
notifications code, and most importantly, the error notifications not
being sent).
Fixes #12595.
2019-09-03 06:08:14 +02:00
|
|
|
|
|
|
|
class RateLimitFilterTest(ZulipTestCase):
|
2020-07-28 01:29:34 +02:00
|
|
|
# This logger has special settings configured in
|
|
|
|
# test_extra_settings.py.
|
|
|
|
logger = logging.getLogger("zulip.test_zulip_admins_handler")
|
|
|
|
|
errors: Fix infinite recursion during memcached outages.
We have a very useful piece of code, _RateLimitFilter, which is
designed to avoid sending us a billion error emails in the event that
a Zulip production server is down in a way that throws the same
exception a lot. The code uses memcached to ensure we send each
traceback roughly once per Zulip server per 10 minutes (or if
memcached is unavailable, at most 1/process/10 minutes, since we use
memcached to coordinate between processes)
However, if memcached is down, there is a logging.error call internal
to the Django/memcached setup that happens inside the cache.set() call,
and those aren't caught by the `except Exception` block around it.
This ends up resulting in infinite recursion, eventually leading to
Fatal Python error: Cannot recover from stack overflow., since this
handler is configured to run for logging.error in addition to
logging.exception.
We fix this using a thread-local variable to detect whether we are
being called recursively.
This change should prevent some nasty failure modes we've had in the
past where memcached being down resulted in infinite recursion
(resulting in extra resources being consumed by our error
notifications code, and most importantly, the error notifications not
being sent).
Fixes #12595.
2019-09-03 06:08:14 +02:00
|
|
|
def test_recursive_filter_handling(self) -> None:
|
|
|
|
def mocked_cache_get(key: str) -> int:
|
2021-02-12 08:19:30 +01:00
|
|
|
self.logger.error(
|
|
|
|
"Log an error to trigger recursive filter() calls in _RateLimitFilter."
|
|
|
|
)
|
errors: Fix infinite recursion during memcached outages.
We have a very useful piece of code, _RateLimitFilter, which is
designed to avoid sending us a billion error emails in the event that
a Zulip production server is down in a way that throws the same
exception a lot. The code uses memcached to ensure we send each
traceback roughly once per Zulip server per 10 minutes (or if
memcached is unavailable, at most 1/process/10 minutes, since we use
memcached to coordinate between processes)
However, if memcached is down, there is a logging.error call internal
to the Django/memcached setup that happens inside the cache.set() call,
and those aren't caught by the `except Exception` block around it.
This ends up resulting in infinite recursion, eventually leading to
Fatal Python error: Cannot recover from stack overflow., since this
handler is configured to run for logging.error in addition to
logging.exception.
We fix this using a thread-local variable to detect whether we are
being called recursively.
This change should prevent some nasty failure modes we've had in the
past where memcached being down resulted in infinite recursion
(resulting in extra resources being consumed by our error
notifications code, and most importantly, the error notifications not
being sent).
Fixes #12595.
2019-09-03 06:08:14 +02:00
|
|
|
raise Exception
|
|
|
|
|
|
|
|
with patch("zerver.lib.logging_util.cache.get", side_effect=mocked_cache_get) as m:
|
2020-07-28 01:29:34 +02:00
|
|
|
self.logger.error("Log an error to trigger initial _RateLimitFilter.filter() call.")
|
errors: Fix infinite recursion during memcached outages.
We have a very useful piece of code, _RateLimitFilter, which is
designed to avoid sending us a billion error emails in the event that
a Zulip production server is down in a way that throws the same
exception a lot. The code uses memcached to ensure we send each
traceback roughly once per Zulip server per 10 minutes (or if
memcached is unavailable, at most 1/process/10 minutes, since we use
memcached to coordinate between processes)
However, if memcached is down, there is a logging.error call internal
to the Django/memcached setup that happens inside the cache.set() call,
and those aren't caught by the `except Exception` block around it.
This ends up resulting in infinite recursion, eventually leading to
Fatal Python error: Cannot recover from stack overflow., since this
handler is configured to run for logging.error in addition to
logging.exception.
We fix this using a thread-local variable to detect whether we are
being called recursively.
This change should prevent some nasty failure modes we've had in the
past where memcached being down resulted in infinite recursion
(resulting in extra resources being consumed by our error
notifications code, and most importantly, the error notifications not
being sent).
Fixes #12595.
2019-09-03 06:08:14 +02:00
|
|
|
# cache.get should have only been called once, by the original filter() call:
|
|
|
|
m.assert_called_once()
|