logging: Use logging.exception and exc_info for unexpected exceptions.

logging.exception() and logging.debug(exc_info=True),
etc. automatically include a traceback.

Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
Anders Kaseorg 2020-06-11 16:35:37 -07:00 committed by Tim Abbott
parent 4b6d2cf25f
commit 1ed2d9b4a0
20 changed files with 71 additions and 58 deletions

View File

@ -128,8 +128,8 @@ def upgrade(request: HttpRequest, user: UserProfile,
schedule, license_management, licenses, stripe_token is not None,
)
return json_error(e.message, data={'error_description': e.description})
except Exception as e:
billing_logger.exception(f"Uncaught exception in billing: {e}")
except Exception:
billing_logger.exception("Uncaught exception in billing:")
error_message = BillingError.CONTACT_SUPPORT
error_description = "uncaught exception during upgrade"
return json_error(error_message, data={'error_description': error_description})

View File

@ -163,7 +163,7 @@ while True:
retries -= 1
if retries <= 0:
raise
logger.info(e)
logger.info(e.pgerror, exc_info=True)
logger.info("Sleeping and reconnecting")
time.sleep(5)
if conn is not None:

View File

@ -49,11 +49,8 @@ def fetch_contributors(repo_link: str) -> Optional[List[Dict[str, Dict[str, Any]
def write_to_disk(json_data: ContributorsJSON, out_file: str) -> None:
with open(out_file, 'w') as f:
try:
f.write(f"{json.dumps(json_data, indent=2, sort_keys=True)}\n")
except OSError as e:
logger.warning(e)
sys.exit(1)
json.dump(json_data, f, indent=2, sort_keys=True)
f.write("\n")
def update_contributor_data_file() -> None:
"""

View File

@ -59,9 +59,11 @@ rules:
- pattern-either:
- pattern: logging.$Y(... % ...)
- pattern: logging.$Y(... .format(...))
- pattern: logging.$Y(f"...")
- pattern: logger.$Y(... % ...)
- pattern: logger.$Y(... .format(...))
- pattern-where-python: "vars['$Y'] in ['debug', 'info', 'warning', 'error', 'critical']"
- pattern: logger.$Y(f"...")
- pattern-where-python: "vars['$Y'] in ['debug', 'info', 'warning', 'error', 'critical', 'exception']"
severity: ERROR
message: "Pass format arguments to logging (https://docs.python.org/3/howto/logging.html#optimization)"

View File

@ -2,7 +2,6 @@ import logging
import os
import random
import shutil
import traceback
from typing import AbstractSet, Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar
import requests
@ -586,8 +585,7 @@ def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[List
try:
f(item)
except Exception:
logging.info("Error processing item: %s", item)
traceback.print_exc()
logging.exception("Error processing item: %s", item)
count += 1
if count % 1000 == 0:
logging.info("A download thread finished %s items", count)

View File

@ -2392,7 +2392,7 @@ def _internal_prep_message(realm: Realm,
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception(f"Error queueing internal message by {sender.delivery_email}: {e}")
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg)
return None

View File

@ -6,7 +6,6 @@ import logging
import os
import re
import time
import traceback
import urllib
import urllib.parse
from collections import defaultdict, deque
@ -424,7 +423,7 @@ def fetch_tweet_data(tweet_id: str) -> Optional[Dict[str, Any]]:
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
bugdown_logger.error(traceback.format_exc())
bugdown_logger.exception("Unknown error fetching tweet data")
return None
return res
@ -984,7 +983,7 @@ class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
bugdown_logger.warning(traceback.format_exc())
bugdown_logger.warning("Error building Twitter link", exc_info=True)
return None
def get_url_data(self, e: Element) -> Optional[Tuple[str, Optional[str]]]:
@ -2367,9 +2366,11 @@ def do_convert(content: str,
# NOTE: Don't change this message without also changing the
# logic in logging_handlers.py or we can create recursive
# exceptions.
exception_message = ('Exception in Markdown parser: %sInput (sanitized) was: %s\n (message %s)'
% (traceback.format_exc(), cleaned, logging_message_id))
bugdown_logger.exception(exception_message)
bugdown_logger.exception(
'Exception in Markdown parser; input (sanitized) was: %s\n (message %s)',
cleaned,
logging_message_id,
)
raise BugdownRenderingException()
finally:

View File

@ -408,12 +408,11 @@ def process_message(message: EmailMessage, rcpt_to: Optional[str]=None) -> None:
process_missed_message(to, message)
else:
process_stream_message(to, message)
except ZulipEmailForwardUserError as e:
# TODO: notify sender of error, retry if appropriate.
logging.warning(e.args[0])
except ZulipEmailForwardError as e:
if isinstance(e, ZulipEmailForwardUserError):
# TODO: notify sender of error, retry if appropriate.
logging.warning(str(e))
else:
log_and_report(message, str(e), to)
log_and_report(message, e.args[0], to)
def validate_to_address(rcpt_to: str) -> None:
if is_missed_message_address(rcpt_to):

View File

@ -332,6 +332,6 @@ def do_rest_call(base_url: str,
response_message = ("An exception of type *%s* occurred for message `%s`! "
"See the Zulip server logs for more information." % (
type(e).__name__, event["command"]))
logging.exception(f"Outhook trigger failed:\n {e}")
logging.exception("Outhook trigger failed:")
fail_with_message(event, response_message)
notify_bot_owner(event, exception=e)

View File

@ -279,8 +279,8 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A
priority=priority,
data=data,
retries=10)
except OSError as e:
logger.warning(str(e))
except OSError:
logger.warning("Error while pushing to GCM", exc_info=True)
return
if res and 'success' in res:

View File

@ -194,9 +194,9 @@ class ExceptionFreeTornadoConnection(pika.adapters.tornado_connection.TornadoCon
super()._adapter_disconnect()
except (pika.exceptions.ProbableAuthenticationError,
pika.exceptions.ProbableAccessDeniedError,
pika.exceptions.IncompatibleProtocolError) as e:
logging.warning("Caught exception '%r' in ExceptionFreeTornadoConnection when \
calling _adapter_disconnect, ignoring", e)
pika.exceptions.IncompatibleProtocolError):
logging.warning("Caught exception in ExceptionFreeTornadoConnection when \
calling _adapter_disconnect, ignoring", exc_info=True)
class TornadoQueueClient(SimpleQueueClient):

View File

@ -67,8 +67,8 @@ def get_expirable_session_var(session: Session, var_name: str, default_value: An
try:
value, expire_at = (session[var_name]['value'], session[var_name]['expire_at'])
except (KeyError, TypeError) as e:
logging.warning("get_expirable_session_var: Variable %s: %s", var_name, e)
except (KeyError, TypeError):
logging.warning("get_expirable_session_var: error getting %s", var_name, exc_info=True)
return default_value
if timestamp_to_datetime(expire_at) < timezone_now():

View File

@ -27,7 +27,7 @@ def sync_ldap_user_data(user_profiles: List[UserProfile], deactivation_protectio
sync_user_from_ldap(u, logger)
except ZulipLDAPException as e:
logger.error("Error attempting to update user %s:", u.delivery_email)
logger.error(e)
logger.error(e.args[0])
if deactivation_protection:
if not UserProfile.objects.filter(is_bot=False, is_active=True).exists():

View File

@ -538,8 +538,11 @@ class InternalPrepTest(ZulipTestCase):
content=bad_content,
)
arg = m.call_args_list[0][0][0]
self.assertIn('Message must not be empty', arg)
m.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"Message must not be empty",
)
with mock.patch('logging.exception') as m:
internal_send_huddle_message(
@ -549,8 +552,11 @@ class InternalPrepTest(ZulipTestCase):
content=bad_content,
)
arg = m.call_args_list[0][0][0]
self.assertIn('Message must not be empty', arg)
m.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"Message must not be empty",
)
with mock.patch('logging.exception') as m:
internal_send_stream_message(
@ -561,8 +567,11 @@ class InternalPrepTest(ZulipTestCase):
stream=stream,
)
arg = m.call_args_list[0][0][0]
self.assertIn('Message must not be empty', arg)
m.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"Message must not be empty",
)
with mock.patch('logging.exception') as m:
internal_send_stream_message_by_name(
@ -573,8 +582,11 @@ class InternalPrepTest(ZulipTestCase):
content=bad_content,
)
arg = m.call_args_list[0][0][0]
self.assertIn('Message must not be empty', arg)
m.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"Message must not be empty",
)
def test_error_handling(self) -> None:
realm = get_realm('zulip')
@ -606,9 +618,11 @@ class InternalPrepTest(ZulipTestCase):
sender=sender,
recipient_user=recipient_user,
content=content)
arg = logging_mock.call_args_list[0][0][0]
prefix = "Error queueing internal message by cordelia@zulip.com: You can't send private messages outside of your organization."
self.assertTrue(arg.startswith(prefix))
logging_mock.assert_called_once_with(
"Error queueing internal message by %s: %s",
"cordelia@zulip.com",
"You can't send private messages outside of your organization.",
)
def test_ensure_stream_gets_called(self) -> None:
realm = get_realm('zulip')

View File

@ -1654,7 +1654,7 @@ class GCMSendTest(PushNotificationTest):
mock_gcm: mock.MagicMock) -> None:
mock_gcm.json_request.side_effect = IOError('error')
send_android_push_notification_to_user(self.user_profile, {}, {})
mock_warn.assert_called_with('error')
mock_warn.assert_called_with("Error while pushing to GCM", exc_info=True)
@mock.patch('zerver.lib.push_notifications.logger.warning')
@mock.patch('zerver.lib.push_notifications.logger.info')

View File

@ -548,7 +548,8 @@ class WorkerTest(ZulipTestCase):
with patch('logging.exception') as logging_exception_mock:
worker.start()
logging_exception_mock.assert_called_once_with(
"Problem handling data on queue unreliable_worker")
"Problem handling data on queue %s", "unreliable_worker",
)
self.assertEqual(processed, ['good', 'fine', 'back to normal'])
with open(fn) as f:
@ -586,7 +587,8 @@ class WorkerTest(ZulipTestCase):
except AbortLoop:
pass
logging_exception_mock.assert_called_once_with(
"Problem handling data on queue unreliable_loopworker")
"Problem handling data on queue %s", "unreliable_loopworker",
)
self.assertEqual(processed, ['good', 'fine'])
with open(fn) as f:

View File

@ -489,14 +489,14 @@ def load_event_queues(port: int) -> None:
except FileNotFoundError:
pass
except ValueError:
logging.exception("Tornado %d could not deserialize event queues" % (port,))
logging.exception("Tornado %d could not deserialize event queues", port)
else:
try:
clients = {
qid: ClientDescriptor.from_dict(client) for (qid, client) in data
}
except Exception:
logging.exception("Tornado %d could not deserialize event queues" % (port,))
logging.exception("Tornado %d could not deserialize event queues", port)
for client in clients.values():
# Put code for migrations due to event queue data format changes here

View File

@ -148,7 +148,7 @@ def retry_send_email_failures(
func(worker, data)
except (smtplib.SMTPServerDisconnected, socket.gaierror, EmailNotDeliveredException):
def on_failure(event: Dict[str, Any]) -> None:
logging.exception(f"Event {event} failed")
logging.exception("Event %r failed", event)
retry_event(worker.queue_name, data, on_failure)
@ -256,7 +256,7 @@ class QueueProcessingWorker(ABC):
check_and_send_restart_signal()
def _log_problem(self) -> None:
logging.exception(f"Problem handling data on queue {self.queue_name}")
logging.exception("Problem handling data on queue %s", self.queue_name)
def setup(self) -> None:
self.q = SimpleQueueClient()

View File

@ -1397,7 +1397,7 @@ class SocialAuthMixin(ZulipAuthMixin, ExternalAuthMethod):
# the flow or the IdP is unreliable and returns a bad http response),
# don't throw a 500, just send them back to the
# login page and record the event at the info log level.
self.logger.info(f"{e.__class__.__name__}: {str(e)}")
self.logger.info("%s: %s", e.__class__.__name__, str(e))
return None
except SocialAuthBaseException as e:
# Other python-social-auth exceptions are likely
@ -1813,9 +1813,9 @@ class SAMLAuthBackend(SocialAuthMixin, SAMLAuth):
try:
resp = OneLogin_Saml2_Response(settings={}, response=SAMLResponse)
issuers = resp.get_issuers()
except cls.SAMLRESPONSE_PARSING_EXCEPTIONS as e:
except cls.SAMLRESPONSE_PARSING_EXCEPTIONS:
logger = logging.getLogger(f"zulip.auth.{cls.name}")
logger.info("Error while parsing SAMLResponse: %s: %s", e.__class__.__name__, e)
logger.info("Error while parsing SAMLResponse:", exc_info=True)
return None
for idp_name, idp_config in settings.SOCIAL_AUTH_SAML_ENABLED_IDPS.items():
@ -1920,9 +1920,9 @@ class SAMLAuthBackend(SocialAuthMixin, SAMLAuth):
# Call the auth_complete method of SocialAuthMixIn
result = super().auth_complete(*args, **kwargs)
except self.SAMLRESPONSE_PARSING_EXCEPTIONS as e:
except self.SAMLRESPONSE_PARSING_EXCEPTIONS:
# These can be raised if SAMLResponse is missing or badly formatted.
self.logger.info("/complete/saml/: %s: %s", e.__class__.__name__, e)
self.logger.info("/complete/saml/: error while parsing SAMLResponse:", exc_info=True)
# Fall through to returning None.
finally:
if result is None:

View File

@ -28,7 +28,7 @@ import django
try:
django.setup()
except Exception as e:
except Exception:
# If /etc/zulip/settings.py contains invalid syntax, Django
# initialization will fail in django.setup(). In this case, our
# normal configuration to logs errors to /var/log/zulip/errors.log
@ -39,7 +39,7 @@ except Exception as e:
logging.basicConfig(filename='/var/log/zulip/errors.log', level=logging.INFO,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
logger.exception(e)
logger.exception("django.setup() failed:")
raise
# Because import_module does not correctly handle safe circular imports we