logging: Report stack_info on logging.exception calls.

The exception trace only goes from where the exception was thrown up
to where the `logging.exception` call is; any context as to where
_that_ was called from is lost, unless `stack_info` is passed as well.
Having the stack is particularly useful for Sentry exceptions, which
gain the full stack trace.

Add `stack_info=True` on all `logging.exception` calls with a
non-trivial stack; we omit `wsgi.py`.  Adjusts tests to match.
This commit is contained in:
Alex Vandiver 2020-08-10 18:19:00 -07:00 committed by Tim Abbott
parent e3a093b73f
commit 2928bbc8bd
15 changed files with 29 additions and 22 deletions

View File

@ -140,7 +140,7 @@ def upgrade(request: HttpRequest, user: UserProfile,
) )
return json_error(e.message, data={'error_description': e.description}) return json_error(e.message, data={'error_description': e.description})
except Exception: except Exception:
billing_logger.exception("Uncaught exception in billing:") billing_logger.exception("Uncaught exception in billing:", stack_info=True)
error_message = BillingError.CONTACT_SUPPORT error_message = BillingError.CONTACT_SUPPORT
error_description = "uncaught exception during upgrade" error_description = "uncaught exception during upgrade"
return json_error(error_message, data={'error_description': error_description}) return json_error(error_message, data={'error_description': error_description})

View File

@ -601,7 +601,7 @@ def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[List
try: try:
f(item) f(item)
except Exception: except Exception:
logging.exception("Error processing item: %s", item) logging.exception("Error processing item: %s", item, stack_info=True)
count += 1 count += 1
if count % 1000 == 0: if count % 1000 == 0:
logging.info("A download thread finished %s items", count) logging.info("A download thread finished %s items", count)

View File

@ -323,9 +323,9 @@ body:
message = message.strip(' ') message = message.strip(' ')
if unexpected_event: if unexpected_event:
webhook_unexpected_events_logger.exception(message) webhook_unexpected_events_logger.exception(message, stack_info=True)
else: else:
webhook_logger.exception(message) webhook_logger.exception(message, stack_info=True)
def full_webhook_client_name(raw_client_name: Optional[str]=None) -> Optional[str]: def full_webhook_client_name(raw_client_name: Optional[str]=None) -> Optional[str]:
if raw_client_name is None: if raw_client_name is None:

View File

@ -2417,7 +2417,7 @@ def _internal_prep_message(realm: Realm,
return check_message(sender, get_client("Internal"), addressee, return check_message(sender, get_client("Internal"), addressee,
content, realm=realm) content, realm=realm)
except JsonableError as e: except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg) logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg, stack_info=True)
return None return None

View File

@ -429,7 +429,7 @@ def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
# but for now it seems reasonable to log at error # but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the # level (so that we get notified), but then cache the
# failure to proceed with our usual work # failure to proceed with our usual work
markdown_logger.exception("Unknown error fetching tweet data") markdown_logger.exception("Unknown error fetching tweet data", stack_info=True)
return None return None
return res return res

View File

@ -333,6 +333,6 @@ def do_rest_call(base_url: str,
f"An exception of type *{type(e).__name__}* occurred for message `{event['command']}`! " f"An exception of type *{type(e).__name__}* occurred for message `{event['command']}`! "
"See the Zulip server logs for more information." "See the Zulip server logs for more information."
) )
logging.exception("Outhook trigger failed:") logging.exception("Outhook trigger failed:", stack_info=True)
fail_with_message(event, response_message) fail_with_message(event, response_message)
notify_bot_owner(event, exception=e) notify_bot_owner(event, exception=e)

View File

@ -34,7 +34,7 @@ if settings.USING_RABBITMQ:
def handle_callback_exception(callback: Callable[..., Any]) -> None: def handle_callback_exception(callback: Callable[..., Any]) -> None:
logging.exception("Exception in callback") logging.exception("Exception in callback", stack_info=True)
app_log.error("Exception in callback %r", callback, exc_info=True) app_log.error("Exception in callback %r", callback, exc_info=True)
class Command(BaseCommand): class Command(BaseCommand):

View File

@ -348,7 +348,7 @@ body:
content_type=request.content_type, content_type=request.content_type,
custom_headers="HTTP_X_CUSTOM_HEADER: custom_value\n", custom_headers="HTTP_X_CUSTOM_HEADER: custom_value\n",
body=request.body, body=request.body,
)) ), stack_info=True)
# Test when an unexpected webhook event occurs # Test when an unexpected webhook event occurs
with mock.patch('zerver.decorator.webhook_unexpected_events_logger.exception') as mock_exception: with mock.patch('zerver.decorator.webhook_unexpected_events_logger.exception') as mock_exception:
@ -379,7 +379,7 @@ body:
content_type=request.content_type, content_type=request.content_type,
custom_headers="HTTP_X_CUSTOM_HEADER: custom_value\n", custom_headers="HTTP_X_CUSTOM_HEADER: custom_value\n",
body=request.body, body=request.body,
)) ), stack_info=True)
with self.settings(RATE_LIMITING=True): with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock: with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
@ -517,7 +517,7 @@ body:
content_type=request.content_type, content_type=request.content_type,
custom_headers=None, custom_headers=None,
body=request.body, body=request.body,
)) ), stack_info=True)
def test_authenticated_rest_api_view_logging_unexpected_event(self) -> None: def test_authenticated_rest_api_view_logging_unexpected_event(self) -> None:
@authenticated_rest_api_view(webhook_client_name="ClientName") @authenticated_rest_api_view(webhook_client_name="ClientName")
@ -561,7 +561,7 @@ body:
content_type=request.content_type, content_type=request.content_type,
custom_headers=None, custom_headers=None,
body=request.body, body=request.body,
)) ), stack_info=True)
def test_authenticated_rest_api_view_with_non_webhook_view(self) -> None: def test_authenticated_rest_api_view_with_non_webhook_view(self) -> None:
@authenticated_rest_api_view() @authenticated_rest_api_view()

View File

@ -1620,6 +1620,7 @@ class InternalPrepTest(ZulipTestCase):
"Error queueing internal message by %s: %s", "Error queueing internal message by %s: %s",
"cordelia@zulip.com", "cordelia@zulip.com",
"Message must not be empty", "Message must not be empty",
stack_info=True,
) )
with mock.patch('logging.exception') as m: with mock.patch('logging.exception') as m:
@ -1634,6 +1635,7 @@ class InternalPrepTest(ZulipTestCase):
"Error queueing internal message by %s: %s", "Error queueing internal message by %s: %s",
"cordelia@zulip.com", "cordelia@zulip.com",
"Message must not be empty", "Message must not be empty",
stack_info=True,
) )
with mock.patch('logging.exception') as m: with mock.patch('logging.exception') as m:
@ -1649,6 +1651,7 @@ class InternalPrepTest(ZulipTestCase):
"Error queueing internal message by %s: %s", "Error queueing internal message by %s: %s",
"cordelia@zulip.com", "cordelia@zulip.com",
"Message must not be empty", "Message must not be empty",
stack_info=True,
) )
with mock.patch('logging.exception') as m: with mock.patch('logging.exception') as m:
@ -1664,6 +1667,7 @@ class InternalPrepTest(ZulipTestCase):
"Error queueing internal message by %s: %s", "Error queueing internal message by %s: %s",
"cordelia@zulip.com", "cordelia@zulip.com",
"Message must not be empty", "Message must not be empty",
stack_info=True,
) )
def test_error_handling(self) -> None: def test_error_handling(self) -> None:
@ -1694,6 +1698,7 @@ class InternalPrepTest(ZulipTestCase):
"Error queueing internal message by %s: %s", "Error queueing internal message by %s: %s",
"cordelia@zulip.com", "cordelia@zulip.com",
"You can't send private messages outside of your organization.", "You can't send private messages outside of your organization.",
stack_info=True,
) )
def test_ensure_stream_gets_called(self) -> None: def test_ensure_stream_gets_called(self) -> None:

View File

@ -570,6 +570,7 @@ class WorkerTest(ZulipTestCase):
worker.start() worker.start()
logging_exception_mock.assert_called_once_with( logging_exception_mock.assert_called_once_with(
"Problem handling data on queue %s", "unreliable_worker", "Problem handling data on queue %s", "unreliable_worker",
stack_info=True,
) )
self.assertEqual(processed, ['good', 'fine', 'back to normal']) self.assertEqual(processed, ['good', 'fine', 'back to normal'])
@ -609,6 +610,7 @@ class WorkerTest(ZulipTestCase):
pass pass
logging_exception_mock.assert_called_once_with( logging_exception_mock.assert_called_once_with(
"Problem handling data on queue %s", "unreliable_loopworker", "Problem handling data on queue %s", "unreliable_loopworker",
stack_info=True,
) )
self.assertEqual(processed, ['good', 'fine']) self.assertEqual(processed, ['good', 'fine'])

View File

@ -56,7 +56,7 @@ class ZephyrTest(ZulipTestCase):
result = post("zephyr", cred=cred) result = post("zephyr", cred=cred)
self.assert_json_error(result, 'We were unable to setup mirroring for you') self.assert_json_error(result, 'We were unable to setup mirroring for you')
log.assert_called_with("Error updating the user's ccache") log.assert_called_with("Error updating the user's ccache", stack_info=True)
with ccache_mock(return_value=b'1234'), mirror_mock(), ssh_mock() as ssh: with ccache_mock(return_value=b'1234'), mirror_mock(), ssh_mock() as ssh:
result = post("zephyr", cred=cred) result = post("zephyr", cred=cred)

View File

@ -179,7 +179,7 @@ class ClientDescriptor:
finish_handler(self.current_handler_id, self.event_queue.id, finish_handler(self.current_handler_id, self.event_queue.id,
self.event_queue.contents(), self.apply_markdown) self.event_queue.contents(), self.apply_markdown)
except Exception: except Exception:
logging.exception(err_msg) logging.exception(err_msg, stack_info=True)
finally: finally:
self.disconnect_handler() self.disconnect_handler()
return True return True
@ -482,14 +482,14 @@ def load_event_queues(port: int) -> None:
except FileNotFoundError: except FileNotFoundError:
pass pass
except ValueError: except ValueError:
logging.exception("Tornado %d could not deserialize event queues", port) logging.exception("Tornado %d could not deserialize event queues", port, stack_info=True)
else: else:
try: try:
clients = { clients = {
qid: ClientDescriptor.from_dict(client) for (qid, client) in data qid: ClientDescriptor.from_dict(client) for (qid, client) in data
} }
except Exception: except Exception:
logging.exception("Tornado %d could not deserialize event queues", port) logging.exception("Tornado %d could not deserialize event queues", port, stack_info=True)
for client in clients.values(): for client in clients.values():
# Put code for migrations due to event queue data format changes here # Put code for migrations due to event queue data format changes here

View File

@ -58,12 +58,12 @@ def finish_handler(handler_id: int, event_queue_id: str,
request, apply_markdown=apply_markdown) request, apply_markdown=apply_markdown)
except OSError as e: except OSError as e:
if str(e) != 'Stream is closed': if str(e) != 'Stream is closed':
logging.exception(err_msg) logging.exception(err_msg, stack_info=True)
except AssertionError as e: except AssertionError as e:
if str(e) != 'Request closed': if str(e) != 'Request closed':
logging.exception(err_msg) logging.exception(err_msg, stack_info=True)
except Exception: except Exception:
logging.exception(err_msg) logging.exception(err_msg, stack_info=True)
class AsyncDjangoHandler(tornado.web.RequestHandler, base.BaseHandler): class AsyncDjangoHandler(tornado.web.RequestHandler, base.BaseHandler):

View File

@ -63,7 +63,7 @@ def webathena_kerberos_login(request: HttpRequest, user_profile: UserProfile,
api_key, api_key,
base64.b64encode(ccache).decode("utf-8")]) base64.b64encode(ccache).decode("utf-8")])
except Exception: except Exception:
logging.exception("Error updating the user's ccache") logging.exception("Error updating the user's ccache", stack_info=True)
return json_error(_("We were unable to setup mirroring for you")) return json_error(_("We were unable to setup mirroring for you"))
return json_success() return json_success()

View File

@ -154,7 +154,7 @@ def retry_send_email_failures(
error_class_name = e.__class__.__name__ error_class_name = e.__class__.__name__
def on_failure(event: Dict[str, Any]) -> None: def on_failure(event: Dict[str, Any]) -> None:
logging.exception("Event %r failed due to exception %s", event, error_class_name) logging.exception("Event %r failed due to exception %s", event, error_class_name, stack_info=True)
retry_event(worker.queue_name, data, on_failure) retry_event(worker.queue_name, data, on_failure)
@ -262,7 +262,7 @@ class QueueProcessingWorker(ABC):
check_and_send_restart_signal() check_and_send_restart_signal()
def _log_problem(self) -> None: def _log_problem(self) -> None:
logging.exception("Problem handling data on queue %s", self.queue_name) logging.exception("Problem handling data on queue %s", self.queue_name, stack_info=True)
def setup(self) -> None: def setup(self) -> None:
self.q = SimpleQueueClient() self.q = SimpleQueueClient()