mirror of https://github.com/zulip/zulip.git
logging: Report stack_info on logging.exception calls.
The exception trace only goes from where the exception was thrown up to where the `logging.exception` call is; any context as to where _that_ was called from is lost, unless `stack_info` is passed as well. Having the stack is particularly useful for Sentry exceptions, which gain the full stack trace. Add `stack_info=True` on all `logging.exception` calls with a non-trivial stack; we omit `wsgi.py`. Adjusts tests to match.
This commit is contained in:
parent
e3a093b73f
commit
2928bbc8bd
|
@ -140,7 +140,7 @@ def upgrade(request: HttpRequest, user: UserProfile,
|
|||
)
|
||||
return json_error(e.message, data={'error_description': e.description})
|
||||
except Exception:
|
||||
billing_logger.exception("Uncaught exception in billing:")
|
||||
billing_logger.exception("Uncaught exception in billing:", stack_info=True)
|
||||
error_message = BillingError.CONTACT_SUPPORT
|
||||
error_description = "uncaught exception during upgrade"
|
||||
return json_error(error_message, data={'error_description': error_description})
|
||||
|
|
|
@ -601,7 +601,7 @@ def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[List
|
|||
try:
|
||||
f(item)
|
||||
except Exception:
|
||||
logging.exception("Error processing item: %s", item)
|
||||
logging.exception("Error processing item: %s", item, stack_info=True)
|
||||
count += 1
|
||||
if count % 1000 == 0:
|
||||
logging.info("A download thread finished %s items", count)
|
||||
|
|
|
@ -323,9 +323,9 @@ body:
|
|||
message = message.strip(' ')
|
||||
|
||||
if unexpected_event:
|
||||
webhook_unexpected_events_logger.exception(message)
|
||||
webhook_unexpected_events_logger.exception(message, stack_info=True)
|
||||
else:
|
||||
webhook_logger.exception(message)
|
||||
webhook_logger.exception(message, stack_info=True)
|
||||
|
||||
def full_webhook_client_name(raw_client_name: Optional[str]=None) -> Optional[str]:
|
||||
if raw_client_name is None:
|
||||
|
|
|
@ -2417,7 +2417,7 @@ def _internal_prep_message(realm: Realm,
|
|||
return check_message(sender, get_client("Internal"), addressee,
|
||||
content, realm=realm)
|
||||
except JsonableError as e:
|
||||
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg)
|
||||
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg, stack_info=True)
|
||||
|
||||
return None
|
||||
|
||||
|
|
|
@ -429,7 +429,7 @@ def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
|
|||
# but for now it seems reasonable to log at error
|
||||
# level (so that we get notified), but then cache the
|
||||
# failure to proceed with our usual work
|
||||
markdown_logger.exception("Unknown error fetching tweet data")
|
||||
markdown_logger.exception("Unknown error fetching tweet data", stack_info=True)
|
||||
return None
|
||||
return res
|
||||
|
||||
|
|
|
@ -333,6 +333,6 @@ def do_rest_call(base_url: str,
|
|||
f"An exception of type *{type(e).__name__}* occurred for message `{event['command']}`! "
|
||||
"See the Zulip server logs for more information."
|
||||
)
|
||||
logging.exception("Outhook trigger failed:")
|
||||
logging.exception("Outhook trigger failed:", stack_info=True)
|
||||
fail_with_message(event, response_message)
|
||||
notify_bot_owner(event, exception=e)
|
||||
|
|
|
@ -34,7 +34,7 @@ if settings.USING_RABBITMQ:
|
|||
|
||||
|
||||
def handle_callback_exception(callback: Callable[..., Any]) -> None:
|
||||
logging.exception("Exception in callback")
|
||||
logging.exception("Exception in callback", stack_info=True)
|
||||
app_log.error("Exception in callback %r", callback, exc_info=True)
|
||||
|
||||
class Command(BaseCommand):
|
||||
|
|
|
@ -348,7 +348,7 @@ body:
|
|||
content_type=request.content_type,
|
||||
custom_headers="HTTP_X_CUSTOM_HEADER: custom_value\n",
|
||||
body=request.body,
|
||||
))
|
||||
), stack_info=True)
|
||||
|
||||
# Test when an unexpected webhook event occurs
|
||||
with mock.patch('zerver.decorator.webhook_unexpected_events_logger.exception') as mock_exception:
|
||||
|
@ -379,7 +379,7 @@ body:
|
|||
content_type=request.content_type,
|
||||
custom_headers="HTTP_X_CUSTOM_HEADER: custom_value\n",
|
||||
body=request.body,
|
||||
))
|
||||
), stack_info=True)
|
||||
|
||||
with self.settings(RATE_LIMITING=True):
|
||||
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
|
||||
|
@ -517,7 +517,7 @@ body:
|
|||
content_type=request.content_type,
|
||||
custom_headers=None,
|
||||
body=request.body,
|
||||
))
|
||||
), stack_info=True)
|
||||
|
||||
def test_authenticated_rest_api_view_logging_unexpected_event(self) -> None:
|
||||
@authenticated_rest_api_view(webhook_client_name="ClientName")
|
||||
|
@ -561,7 +561,7 @@ body:
|
|||
content_type=request.content_type,
|
||||
custom_headers=None,
|
||||
body=request.body,
|
||||
))
|
||||
), stack_info=True)
|
||||
|
||||
def test_authenticated_rest_api_view_with_non_webhook_view(self) -> None:
|
||||
@authenticated_rest_api_view()
|
||||
|
|
|
@ -1620,6 +1620,7 @@ class InternalPrepTest(ZulipTestCase):
|
|||
"Error queueing internal message by %s: %s",
|
||||
"cordelia@zulip.com",
|
||||
"Message must not be empty",
|
||||
stack_info=True,
|
||||
)
|
||||
|
||||
with mock.patch('logging.exception') as m:
|
||||
|
@ -1634,6 +1635,7 @@ class InternalPrepTest(ZulipTestCase):
|
|||
"Error queueing internal message by %s: %s",
|
||||
"cordelia@zulip.com",
|
||||
"Message must not be empty",
|
||||
stack_info=True,
|
||||
)
|
||||
|
||||
with mock.patch('logging.exception') as m:
|
||||
|
@ -1649,6 +1651,7 @@ class InternalPrepTest(ZulipTestCase):
|
|||
"Error queueing internal message by %s: %s",
|
||||
"cordelia@zulip.com",
|
||||
"Message must not be empty",
|
||||
stack_info=True,
|
||||
)
|
||||
|
||||
with mock.patch('logging.exception') as m:
|
||||
|
@ -1664,6 +1667,7 @@ class InternalPrepTest(ZulipTestCase):
|
|||
"Error queueing internal message by %s: %s",
|
||||
"cordelia@zulip.com",
|
||||
"Message must not be empty",
|
||||
stack_info=True,
|
||||
)
|
||||
|
||||
def test_error_handling(self) -> None:
|
||||
|
@ -1694,6 +1698,7 @@ class InternalPrepTest(ZulipTestCase):
|
|||
"Error queueing internal message by %s: %s",
|
||||
"cordelia@zulip.com",
|
||||
"You can't send private messages outside of your organization.",
|
||||
stack_info=True,
|
||||
)
|
||||
|
||||
def test_ensure_stream_gets_called(self) -> None:
|
||||
|
|
|
@ -570,6 +570,7 @@ class WorkerTest(ZulipTestCase):
|
|||
worker.start()
|
||||
logging_exception_mock.assert_called_once_with(
|
||||
"Problem handling data on queue %s", "unreliable_worker",
|
||||
stack_info=True,
|
||||
)
|
||||
|
||||
self.assertEqual(processed, ['good', 'fine', 'back to normal'])
|
||||
|
@ -609,6 +610,7 @@ class WorkerTest(ZulipTestCase):
|
|||
pass
|
||||
logging_exception_mock.assert_called_once_with(
|
||||
"Problem handling data on queue %s", "unreliable_loopworker",
|
||||
stack_info=True,
|
||||
)
|
||||
|
||||
self.assertEqual(processed, ['good', 'fine'])
|
||||
|
|
|
@ -56,7 +56,7 @@ class ZephyrTest(ZulipTestCase):
|
|||
result = post("zephyr", cred=cred)
|
||||
|
||||
self.assert_json_error(result, 'We were unable to setup mirroring for you')
|
||||
log.assert_called_with("Error updating the user's ccache")
|
||||
log.assert_called_with("Error updating the user's ccache", stack_info=True)
|
||||
|
||||
with ccache_mock(return_value=b'1234'), mirror_mock(), ssh_mock() as ssh:
|
||||
result = post("zephyr", cred=cred)
|
||||
|
|
|
@ -179,7 +179,7 @@ class ClientDescriptor:
|
|||
finish_handler(self.current_handler_id, self.event_queue.id,
|
||||
self.event_queue.contents(), self.apply_markdown)
|
||||
except Exception:
|
||||
logging.exception(err_msg)
|
||||
logging.exception(err_msg, stack_info=True)
|
||||
finally:
|
||||
self.disconnect_handler()
|
||||
return True
|
||||
|
@ -482,14 +482,14 @@ def load_event_queues(port: int) -> None:
|
|||
except FileNotFoundError:
|
||||
pass
|
||||
except ValueError:
|
||||
logging.exception("Tornado %d could not deserialize event queues", port)
|
||||
logging.exception("Tornado %d could not deserialize event queues", port, stack_info=True)
|
||||
else:
|
||||
try:
|
||||
clients = {
|
||||
qid: ClientDescriptor.from_dict(client) for (qid, client) in data
|
||||
}
|
||||
except Exception:
|
||||
logging.exception("Tornado %d could not deserialize event queues", port)
|
||||
logging.exception("Tornado %d could not deserialize event queues", port, stack_info=True)
|
||||
|
||||
for client in clients.values():
|
||||
# Put code for migrations due to event queue data format changes here
|
||||
|
|
|
@ -58,12 +58,12 @@ def finish_handler(handler_id: int, event_queue_id: str,
|
|||
request, apply_markdown=apply_markdown)
|
||||
except OSError as e:
|
||||
if str(e) != 'Stream is closed':
|
||||
logging.exception(err_msg)
|
||||
logging.exception(err_msg, stack_info=True)
|
||||
except AssertionError as e:
|
||||
if str(e) != 'Request closed':
|
||||
logging.exception(err_msg)
|
||||
logging.exception(err_msg, stack_info=True)
|
||||
except Exception:
|
||||
logging.exception(err_msg)
|
||||
logging.exception(err_msg, stack_info=True)
|
||||
|
||||
|
||||
class AsyncDjangoHandler(tornado.web.RequestHandler, base.BaseHandler):
|
||||
|
|
|
@ -63,7 +63,7 @@ def webathena_kerberos_login(request: HttpRequest, user_profile: UserProfile,
|
|||
api_key,
|
||||
base64.b64encode(ccache).decode("utf-8")])
|
||||
except Exception:
|
||||
logging.exception("Error updating the user's ccache")
|
||||
logging.exception("Error updating the user's ccache", stack_info=True)
|
||||
return json_error(_("We were unable to setup mirroring for you"))
|
||||
|
||||
return json_success()
|
||||
|
|
|
@ -154,7 +154,7 @@ def retry_send_email_failures(
|
|||
error_class_name = e.__class__.__name__
|
||||
|
||||
def on_failure(event: Dict[str, Any]) -> None:
|
||||
logging.exception("Event %r failed due to exception %s", event, error_class_name)
|
||||
logging.exception("Event %r failed due to exception %s", event, error_class_name, stack_info=True)
|
||||
|
||||
retry_event(worker.queue_name, data, on_failure)
|
||||
|
||||
|
@ -262,7 +262,7 @@ class QueueProcessingWorker(ABC):
|
|||
check_and_send_restart_signal()
|
||||
|
||||
def _log_problem(self) -> None:
|
||||
logging.exception("Problem handling data on queue %s", self.queue_name)
|
||||
logging.exception("Problem handling data on queue %s", self.queue_name, stack_info=True)
|
||||
|
||||
def setup(self) -> None:
|
||||
self.q = SimpleQueueClient()
|
||||
|
|
Loading…
Reference in New Issue