diff --git a/analytics/lib/counts.py b/analytics/lib/counts.py index 072326c86d..0033ea61c9 100644 --- a/analytics/lib/counts.py +++ b/analytics/lib/counts.py @@ -97,13 +97,13 @@ def process_count_stat(stat: CountStat, fill_to_time: datetime, fill_state = FillState.objects.create(property=stat.property, end_time=currently_filled, state=FillState.DONE) - logger.info("INITIALIZED %s %s" % (stat.property, currently_filled)) + logger.info("INITIALIZED %s %s", stat.property, currently_filled) elif fill_state.state == FillState.STARTED: - logger.info("UNDO START %s %s" % (stat.property, fill_state.end_time)) + logger.info("UNDO START %s %s", stat.property, fill_state.end_time) do_delete_counts_at_hour(stat, fill_state.end_time) currently_filled = fill_state.end_time - time_increment do_update_fill_state(fill_state, currently_filled, FillState.DONE) - logger.info("UNDO DONE %s" % (stat.property,)) + logger.info("UNDO DONE %s", stat.property) elif fill_state.state == FillState.DONE: currently_filled = fill_state.end_time else: @@ -113,21 +113,21 @@ def process_count_stat(stat: CountStat, fill_to_time: datetime, for dependency in stat.dependencies: dependency_fill_time = last_successful_fill(dependency) if dependency_fill_time is None: - logger.warning("DependentCountStat %s run before dependency %s." % - (stat.property, dependency)) + logger.warning("DependentCountStat %s run before dependency %s.", + stat.property, dependency) return fill_to_time = min(fill_to_time, dependency_fill_time) currently_filled = currently_filled + time_increment while currently_filled <= fill_to_time: - logger.info("START %s %s" % (stat.property, currently_filled)) + logger.info("START %s %s", stat.property, currently_filled) start = time.time() do_update_fill_state(fill_state, currently_filled, FillState.STARTED) do_fill_count_stat_at_hour(stat, currently_filled, realm) do_update_fill_state(fill_state, currently_filled, FillState.DONE) end = time.time() currently_filled = currently_filled + time_increment - logger.info("DONE %s (%dms)" % (stat.property, (end-start)*1000)) + logger.info("DONE %s (%dms)", stat.property, (end-start)*1000) def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None: fill_state.end_time = end_time @@ -142,8 +142,8 @@ def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime, realm: Optio timer = time.time() assert(stat.data_collector.pull_function is not None) rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time, realm) - logger.info("%s run pull_function (%dms/%sr)" % - (stat.property, (time.time()-timer)*1000, rows_added)) + logger.info("%s run pull_function (%dms/%sr)", + stat.property, (time.time()-timer)*1000, rows_added) do_aggregate_to_summary_table(stat, end_time, realm) def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None: @@ -190,8 +190,10 @@ def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime, start = time.time() cursor.execute(realmcount_query, {'end_time': end_time}) end = time.time() - logger.info("%s RealmCount aggregation (%dms/%sr)" % ( - stat.property, (end - start) * 1000, cursor.rowcount)) + logger.info( + "%s RealmCount aggregation (%dms/%sr)", + stat.property, (end - start) * 1000, cursor.rowcount, + ) if realm is None: # Aggregate into InstallationCount. Only run if we just @@ -213,8 +215,10 @@ def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime, start = time.time() cursor.execute(installationcount_query, {'end_time': end_time}) end = time.time() - logger.info("%s InstallationCount aggregation (%dms/%sr)" % ( - stat.property, (end - start) * 1000, cursor.rowcount)) + logger.info( + "%s InstallationCount aggregation (%dms/%sr)", + stat.property, (end - start) * 1000, cursor.rowcount, + ) cursor.close() diff --git a/analytics/management/commands/analyze_mit.py b/analytics/management/commands/analyze_mit.py index dcef41567c..4ca642d0a1 100644 --- a/analytics/management/commands/analyze_mit.py +++ b/analytics/management/commands/analyze_mit.py @@ -43,7 +43,7 @@ def compute_stats(log_level: int) -> None: total_counts[client_name] += count total_user_counts[email] += count - logging.debug("%40s | %10s | %s" % ("User", "Messages", "Percentage Zulip")) + logging.debug("%40s | %10s | %s", "User", "Messages", "Percentage Zulip") top_percents: Dict[int, float] = {} for size in [10, 25, 50, 100, 200, len(total_user_counts.keys())]: top_percents[size] = 0.0 @@ -56,18 +56,18 @@ def compute_stats(log_level: int) -> None: if i < size: top_percents[size] += (percent_zulip * 1.0 / size) - logging.debug("%40s | %10s | %s%%" % (email, total_user_counts[email], - percent_zulip)) + logging.debug("%40s | %10s | %s%%", email, total_user_counts[email], + percent_zulip) logging.info("") for size in sorted(top_percents.keys()): - logging.info("Top %6s | %s%%" % (size, round(top_percents[size], 1))) + logging.info("Top %6s | %s%%", size, round(top_percents[size], 1)) grand_total = sum(total_counts.values()) print(grand_total) - logging.info("%15s | %s" % ("Client", "Percentage")) + logging.info("%15s | %s", "Client", "Percentage") for client in total_counts.keys(): - logging.info("%15s | %s%%" % (client, round(100. * total_counts[client] / grand_total, 1))) + logging.info("%15s | %s%%", client, round(100. * total_counts[client] / grand_total, 1)) class Command(BaseCommand): help = "Compute statistics on MIT Zephyr usage." diff --git a/analytics/views.py b/analytics/views.py index c240993ccd..53611f1667 100644 --- a/analytics/views.py +++ b/analytics/views.py @@ -258,7 +258,7 @@ def get_chart_data(request: HttpRequest, user_profile: UserProfile, chart_name: logging.warning("User from realm %s attempted to access /stats, but the computed " "start time: %s (creation of realm or installation) is later than the computed " "end time: %s (last successful analytics update). Is the " - "analytics cron job running?" % (realm.string_id, start, end)) + "analytics cron job running?", realm.string_id, start, end) raise JsonableError(_("No analytics data available. Please contact your server administrator.")) assert len({stat.frequency for stat in stats}) == 1 diff --git a/scripts/lib/setup_venv.py b/scripts/lib/setup_venv.py index 44b10da42a..8b26ccd3bb 100644 --- a/scripts/lib/setup_venv.py +++ b/scripts/lib/setup_venv.py @@ -211,7 +211,7 @@ def try_to_copy_venv(venv_path: str, new_packages: Set[str]) -> bool: except subprocess.CalledProcessError: # Virtualenv-clone is either not installed or threw an # error. Just return False: making a new venv is safe. - logging.warning("Error cloning virtualenv %s" % (source_venv_path,)) + logging.warning("Error cloning virtualenv %s", source_venv_path) return False # virtualenv-clone, unfortunately, copies the success stamp, diff --git a/scripts/lib/zulip_tools.py b/scripts/lib/zulip_tools.py index 2c85fb864f..f7cd132285 100755 --- a/scripts/lib/zulip_tools.py +++ b/scripts/lib/zulip_tools.py @@ -210,7 +210,7 @@ def log_management_command(cmd: str, log_path: str) -> None: logger.addHandler(file_handler) logger.setLevel(logging.INFO) - logger.info("Ran '%s'" % (cmd,)) + logger.info("Ran '%s'", cmd) def get_environment() -> str: if os.path.exists(DEPLOYMENTS_DIR): diff --git a/tools/lib/provision.py b/tools/lib/provision.py index cad8d47504..663b67d93a 100755 --- a/tools/lib/provision.py +++ b/tools/lib/provision.py @@ -95,7 +95,7 @@ elif vendor == "rhel" and os_version.startswith("7."): elif vendor == "centos" and os_version == "7": POSTGRES_VERSION = "10" else: - logging.critical("Unsupported platform: {} {}".format(vendor, os_version)) + logging.critical("Unsupported platform: %s %s", vendor, os_version) if vendor == 'ubuntu' and os_version == '14.04': print() print("Ubuntu Trusty reached end-of-life upstream and is no longer a supported platform for Zulip") diff --git a/tools/semgrep.yml b/tools/semgrep.yml index 3bc4543241..41877b2fc0 100644 --- a/tools/semgrep.yml +++ b/tools/semgrep.yml @@ -49,3 +49,29 @@ rules: - path-not: 'zerver/migrations/0209_user_profile_no_empty_password.py' - path-not: 'zerver/migrations/0260_missed_message_addresses_from_redis_to_db.py' - path-not: 'pgroonga/migrations/0002_html_escape_subject.py' + + - id: logging-format + languages: [python] + pattern-either: + - pattern: logging.debug(... % ...) + - pattern: logging.debug(... .format(...)) + - pattern: logger.debug(... % ...) + - pattern: logger.debug(... .format(...)) + - pattern: logging.info(... % ...) + - pattern: logging.info(... .format(...)) + - pattern: logger.info(... % ...) + - pattern: logger.info(... .format(...)) + - pattern: logging.warning(... % ...) + - pattern: logging.warning(... .format(...)) + - pattern: logger.warning(... % ...) + - pattern: logger.warning(... .format(...)) + - pattern: logging.error(... % ...) + - pattern: logging.error(... .format(...)) + - pattern: logger.error(... % ...) + - pattern: logger.error(... .format(...)) + - pattern: logging.critical(... % ...) + - pattern: logging.critical(... .format(...)) + - pattern: logger.critical(... % ...) + - pattern: logger.critical(... .format(...)) + severity: ERROR + message: "Pass format arguments to logging (https://docs.python.org/3/howto/logging.html#optimization)" diff --git a/zerver/data_import/gitter.py b/zerver/data_import/gitter.py index d8f5e546ef..fc13a15906 100644 --- a/zerver/data_import/gitter.py +++ b/zerver/data_import/gitter.py @@ -199,7 +199,7 @@ def convert_gitter_workspace_messages(gitter_data: GitterDataT, output_dir: str, message_json['zerver_message'] = zerver_message message_json['zerver_usermessage'] = zerver_usermessage message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,)) - logging.info("Writing Messages to %s\n" % (message_filename,)) + logging.info("Writing Messages to %s\n", message_filename) write_data_to_file(os.path.join(message_filename), message_json) low_index = upper_index @@ -216,8 +216,8 @@ def get_usermentions(message: Dict[str, Any], user_map: Dict[str, int], if mention.get('userId') in user_map: gitter_mention = '@%s' % (mention['screenName'],) if mention['screenName'] not in user_short_name_to_full_name: - logging.info("Mentioned user %s never sent any messages, so has no full name data" % - (mention['screenName'],)) + logging.info("Mentioned user %s never sent any messages, so has no full name data", + mention['screenName']) full_name = mention['screenName'] else: full_name = user_short_name_to_full_name[mention['screenName']] @@ -278,7 +278,7 @@ def do_convert_data(gitter_data_file: str, output_dir: str, threads: int=6) -> N subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P']) logging.info('######### DATA CONVERSION FINISHED #########\n') - logging.info("Zulip data dump created at %s" % (output_dir,)) + logging.info("Zulip data dump created at %s", output_dir) def write_data_to_file(output_file: str, data: Any) -> None: with open(output_file, "w") as f: diff --git a/zerver/data_import/hipchat.py b/zerver/data_import/hipchat.py index 9fd95500e8..2c90d7301e 100755 --- a/zerver/data_import/hipchat.py +++ b/zerver/data_import/hipchat.py @@ -70,14 +70,14 @@ def untar_input_file(tar_file: str) -> str: data_dir = os.path.abspath(data_dir) if os.path.exists(data_dir): - logging.info('input data was already untarred to %s, we will use it' % (data_dir,)) + logging.info('input data was already untarred to %s, we will use it', data_dir) return data_dir os.makedirs(data_dir) subprocess.check_call(['tar', '-xf', tar_file, '-C', data_dir]) - logging.info('input data was untarred to %s' % (data_dir,)) + logging.info('input data was untarred to %s', data_dir) return data_dir @@ -696,7 +696,7 @@ def process_raw_message_batch(realm_id: int, content = h.handle(content) if len(content) > 10000: - logging.info('skipping too-long message of length %s' % (len(content),)) + logging.info('skipping too-long message of length %s', len(content)) continue date_sent = raw_message['date_sent'] diff --git a/zerver/data_import/import_util.py b/zerver/data_import/import_util.py index 51ebf6041b..a446fa14ea 100644 --- a/zerver/data_import/import_util.py +++ b/zerver/data_import/import_util.py @@ -572,7 +572,7 @@ def write_avatar_png(avatar_folder: str, ListJobData = TypeVar('ListJobData') def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[ListJobData], threads: int=6) -> Iterable[Tuple[int, List[ListJobData]]]: - logging.info("Distributing %s items across %s threads" % (len(full_items), threads)) + logging.info("Distributing %s items across %s threads", len(full_items), threads) def wrapping_function(items: List[ListJobData]) -> int: count = 0 @@ -580,11 +580,11 @@ def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[List try: f(item) except Exception: - logging.info("Error processing item: %s" % (item,)) + logging.info("Error processing item: %s", item) traceback.print_exc() count += 1 if count % 1000 == 0: - logging.info("A download thread finished %s items" % (count,)) + logging.info("A download thread finished %s items", count) return 0 job_lists: List[List[ListJobData]] = [full_items[i::threads] for i in range(threads)] return run_parallel(wrapping_function, job_lists, threads=threads) diff --git a/zerver/data_import/mattermost.py b/zerver/data_import/mattermost.py index 4af9b3f0bf..26ec42c4d2 100644 --- a/zerver/data_import/mattermost.py +++ b/zerver/data_import/mattermost.py @@ -330,7 +330,7 @@ def process_raw_message_batch(realm_id: int, content = h.handle(content) if len(content) > 10000: # nocoverage - logging.info('skipping too-long message of length %s' % (len(content),)) + logging.info('skipping too-long message of length %s', len(content)) continue date_sent = raw_message['date_sent'] diff --git a/zerver/data_import/slack.py b/zerver/data_import/slack.py index 14a05510a2..038eb78fca 100755 --- a/zerver/data_import/slack.py +++ b/zerver/data_import/slack.py @@ -205,7 +205,7 @@ def users_to_zerver_userprofile(slack_data_dir: str, users: List[ZerverFieldsT], if not user.get('is_primary_owner', False): user_id_count += 1 - logging.info("{} -> {}".format(user['name'], userprofile_dict['email'])) + logging.info("%s -> %s", user['name'], userprofile_dict['email']) process_customprofilefields(zerver_customprofilefield, zerver_customprofilefield_values) logging.info('######### IMPORTING USERS FINISHED #########\n') @@ -397,7 +397,7 @@ def channels_to_zerver_stream(slack_data_dir: str, realm_id: int, stream_id_count += 1 recipient_id_count += 1 - logging.info("{} -> created".format(channel['name'])) + logging.info("%s -> created", channel['name']) # TODO map Slack's pins to Zulip's stars # There is the security model that Slack's pins are known to the team owner @@ -443,7 +443,7 @@ def channels_to_zerver_stream(slack_data_dir: str, realm_id: int, huddle_id_count += 1 recipient_id_count += 1 - logging.info("{} -> created".format(mpim['name'])) + logging.info("%s -> created", mpim['name']) try: mpims = get_data_file(slack_data_dir + '/mpims.json') @@ -595,7 +595,7 @@ def convert_slack_workspace_messages(slack_data_dir: str, users: List[ZerverFiel zerver_usermessage=zerver_usermessage) message_file = "/messages-%06d.json" % (dump_file_id,) - logging.info("Writing Messages to %s\n" % (output_dir + message_file,)) + logging.info("Writing Messages to %s\n", output_dir + message_file) create_converted_data_files(message_json, output_dir, message_file) total_reactions += reactions @@ -796,8 +796,10 @@ def channel_message_to_zerver_message(realm_id: int, total_user_messages += num_created total_skipped_user_messages += num_skipped - logging.debug("Created %s UserMessages; deferred %s due to long-term idle" % ( - total_user_messages, total_skipped_user_messages)) + logging.debug( + "Created %s UserMessages; deferred %s due to long-term idle", + total_user_messages, total_skipped_user_messages, + ) return zerver_message, zerver_usermessage, zerver_attachment, uploads_list, \ reaction_list @@ -1091,7 +1093,7 @@ def do_convert_data(slack_zip_file: str, output_dir: str, token: str, threads: i subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P']) logging.info('######### DATA CONVERSION FINISHED #########\n') - logging.info("Zulip data dump created at %s" % (output_dir,)) + logging.info("Zulip data dump created at %s", output_dir) def get_data_file(path: str) -> Any: with open(path) as fp: diff --git a/zerver/decorator.py b/zerver/decorator.py index ea72e257ed..b6c2796876 100644 --- a/zerver/decorator.py +++ b/zerver/decorator.py @@ -224,8 +224,10 @@ def validate_account_and_subdomain(request: HttpRequest, user_profile: UserProfi not (settings.RUNNING_INSIDE_TORNADO and request.META["SERVER_NAME"] == "127.0.0.1" and request.META["REMOTE_ADDR"] == "127.0.0.1")): - logging.warning("User %s (%s) attempted to access API on wrong subdomain (%s)" % ( - user_profile.delivery_email, user_profile.realm.subdomain, get_subdomain(request))) + logging.warning( + "User %s (%s) attempted to access API on wrong subdomain (%s)", + user_profile.delivery_email, user_profile.realm.subdomain, get_subdomain(request), + ) raise JsonableError(_("Account is not associated with this subdomain")) def access_user_by_api_key(request: HttpRequest, api_key: str, email: Optional[str]=None) -> UserProfile: @@ -772,8 +774,8 @@ def rate_limit(domain: str='api_by_user') -> Callable[[ViewFuncT], ViewFuncT]: user = None if not user: # nocoverage # See comments below - logging.error("Requested rate-limiting on %s but user is not authenticated!" % - (func.__name__,)) + logging.error("Requested rate-limiting on %s but user is not authenticated!", + func.__name__) return func(request, *args, **kwargs) if isinstance(user, AnonymousUser): # nocoverage diff --git a/zerver/forms.py b/zerver/forms.py index 31c6b15f0a..13a5325dfb 100644 --- a/zerver/forms.py +++ b/zerver/forms.py @@ -244,7 +244,7 @@ class ZulipPasswordResetForm(PasswordResetForm): realm = get_realm(get_subdomain(request)) if not email_auth_enabled(realm): - logging.info("Password reset attempted for %s even though password auth is disabled." % (email,)) + logging.info("Password reset attempted for %s even though password auth is disabled.", email) return if email_belongs_to_ldap(realm, email): # TODO: Ideally, we'd provide a user-facing error here @@ -261,7 +261,7 @@ class ZulipPasswordResetForm(PasswordResetForm): rate_limit_password_reset_form_by_email(email) except RateLimited: # TODO: Show an informative, user-facing error message. - logging.info("Too many password reset attempts for email %s" % (email,)) + logging.info("Too many password reset attempts for email %s", email) return user: Optional[UserProfile] = None @@ -329,8 +329,8 @@ class OurAuthenticationForm(AuthenticationForm): try: realm = get_realm(subdomain) except Realm.DoesNotExist: - logging.warning("User %s attempted to password login to nonexistent subdomain %s" % - (username, subdomain)) + logging.warning("User %s attempted to password login to nonexistent subdomain %s", + username, subdomain) raise ValidationError("Realm does not exist") return_data: Dict[str, Any] = {} @@ -351,8 +351,8 @@ class OurAuthenticationForm(AuthenticationForm): raise ValidationError(mark_safe(DEACTIVATED_ACCOUNT_ERROR)) if return_data.get("invalid_subdomain"): - logging.warning("User %s attempted to password login to wrong subdomain %s" % - (username, subdomain)) + logging.warning("User %s attempted to password login to wrong subdomain %s", + username, subdomain) raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR)) if self.user_cache is None: diff --git a/zerver/lib/actions.py b/zerver/lib/actions.py index b2e8b640e9..e6d70c42f5 100644 --- a/zerver/lib/actions.py +++ b/zerver/lib/actions.py @@ -1219,8 +1219,9 @@ def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[i queue_name = 'embedded_bots' else: logging.error( - 'Unexpected bot_type for Service bot id=%s: %s' % - (user_profile_id, bot_type)) + 'Unexpected bot_type for Service bot id=%s: %s', + user_profile_id, bot_type, + ) return is_stream = (recipient_type == Recipient.STREAM) @@ -5420,8 +5421,10 @@ def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> boo # # Either case is unusual and suggests a UI bug that got # the user in this situation, so we log in these cases. - logging.warning("User %s tried to share upload %s in message %s, but lacks permission" % ( - user_profile.id, path_id, message.id)) + logging.warning( + "User %s tried to share upload %s in message %s, but lacks permission", + user_profile.id, path_id, message.id, + ) continue claimed = True diff --git a/zerver/lib/cache.py b/zerver/lib/cache.py index 38545be7f4..9bbb3c6386 100644 --- a/zerver/lib/cache.py +++ b/zerver/lib/cache.py @@ -198,7 +198,7 @@ class InvalidCacheKeyException(Exception): def log_invalid_cache_keys(stack_trace: str, key: List[str]) -> None: logger.warning( - "Invalid cache key used: {}\nStack trace: {}\n".format(key, stack_trace) + "Invalid cache key used: %s\nStack trace: %s\n", key, stack_trace, ) def validate_cache_key(key: str) -> None: diff --git a/zerver/lib/cache_helpers.py b/zerver/lib/cache_helpers.py index 8bdaa775ff..5dd6cc8953 100644 --- a/zerver/lib/cache_helpers.py +++ b/zerver/lib/cache_helpers.py @@ -133,6 +133,6 @@ def fill_remote_cache(cache: str) -> None: cache_set_many(items_for_remote_cache, timeout=3600*24) items_for_remote_cache = {} cache_set_many(items_for_remote_cache, timeout=3600*24*7) - logging.info("Successfully populated %s cache! Consumed %s remote cache queries (%s time)" % - (cache, get_remote_cache_requests() - remote_cache_requests_start, - round(get_remote_cache_time() - remote_cache_time_start, 2))) + logging.info("Successfully populated %s cache! Consumed %s remote cache queries (%s time)", + cache, get_remote_cache_requests() - remote_cache_requests_start, + round(get_remote_cache_time() - remote_cache_time_start, 2)) diff --git a/zerver/lib/debug.py b/zerver/lib/debug.py index a49c60b674..d3790a2992 100644 --- a/zerver/lib/debug.py +++ b/zerver/lib/debug.py @@ -40,8 +40,8 @@ def interactive_debug_listen() -> None: def tracemalloc_dump() -> None: if not tracemalloc.is_tracing(): - logger.warning("pid {}: tracemalloc off, nothing to dump" - .format(os.getpid())) + logger.warning("pid %s: tracemalloc off, nothing to dump", + os.getpid()) return # Despite our name for it, `timezone_now` always deals in UTC. basename = "snap.{}.{}".format(os.getpid(), @@ -55,15 +55,15 @@ def tracemalloc_dump() -> None: with open('/proc/{}/stat'.format(os.getpid()), 'rb') as f: procstat = f.read().split() rss_pages = int(procstat[23]) - logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}" - .format(tracemalloc.get_traced_memory()[0] // 1048576, - tracemalloc.get_traced_memory()[1] // 1048576, - tracemalloc.get_tracemalloc_memory() // 1048576, - rss_pages // 256, - basename)) + logger.info("tracemalloc dump: tracing %s MiB (%s MiB peak), using %s MiB; rss %s MiB; dumped %s", + tracemalloc.get_traced_memory()[0] // 1048576, + tracemalloc.get_traced_memory()[1] // 1048576, + tracemalloc.get_tracemalloc_memory() // 1048576, + rss_pages // 256, + basename) def tracemalloc_listen_sock(sock: socket.socket) -> None: - logger.debug('pid {}: tracemalloc_listen_sock started!'.format(os.getpid())) + logger.debug('pid %s: tracemalloc_listen_sock started!', os.getpid()) while True: sock.recv(1) tracemalloc_dump() @@ -75,7 +75,7 @@ def tracemalloc_listen() -> None: if listener_pid == os.getpid(): # Already set up -- and in this process, not just its parent. return - logger.debug('pid {}: tracemalloc_listen working...'.format(os.getpid())) + logger.debug('pid %s: tracemalloc_listen working...', os.getpid()) listener_pid = os.getpid() sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) @@ -84,8 +84,7 @@ def tracemalloc_listen() -> None: thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock), daemon=True) thread.start() - logger.debug('pid {}: tracemalloc_listen done: {}'.format( - os.getpid(), path)) + logger.debug('pid %s: tracemalloc_listen done: %s', os.getpid(), path) def maybe_tracemalloc_listen() -> None: '''If tracemalloc tracing enabled, listen for requests to dump a snapshot. diff --git a/zerver/lib/digest.py b/zerver/lib/digest.py index 58ca1e1901..4b60d8f87f 100644 --- a/zerver/lib/digest.py +++ b/zerver/lib/digest.py @@ -70,8 +70,10 @@ def enqueue_emails(cutoff: datetime.datetime) -> None: for user_profile in user_profiles: if inactive_since(user_profile, cutoff): queue_digest_recipient(user_profile, cutoff) - logger.info("User %s is inactive, queuing for potential digest" % ( - user_profile.id,)) + logger.info( + "User %s is inactive, queuing for potential digest", + user_profile.id, + ) def gather_hot_conversations(user_profile: UserProfile, messages: List[Message]) -> List[Dict[str, Any]]: # Gather stream conversations of 2 types: @@ -209,7 +211,7 @@ def handle_digest_email(user_profile_id: int, cutoff: float, # We don't want to send emails containing almost no information. if enough_traffic(context["hot_conversations"], new_streams_count): - logger.info("Sending digest email for user %s" % (user_profile.id,)) + logger.info("Sending digest email for user %s", user_profile.id) # Send now, as a ScheduledEmail send_future_email('zerver/emails/digest', user_profile.realm, to_user_ids=[user_profile.id], from_name="Zulip Digest", from_address=FromAddress.no_reply_placeholder, diff --git a/zerver/lib/email_mirror.py b/zerver/lib/email_mirror.py index c3f38a9a33..0e4d46e649 100644 --- a/zerver/lib/email_mirror.py +++ b/zerver/lib/email_mirror.py @@ -200,7 +200,7 @@ def extract_body(message: message.Message, include_quotes: bool=False, prefer_te html_content = extract_html_body(message, include_quotes) if plaintext_content is None and html_content is None: - logging.warning("Content types: %s" % ([part.get_content_type() for part in message.walk()],)) + logging.warning("Content types: %s", [part.get_content_type() for part in message.walk()]) raise ZulipEmailForwardUserError("Unable to find plaintext or HTML message body") if not plaintext_content and not html_content: raise ZulipEmailForwardUserError("Email has no nonempty body sections; ignoring.") @@ -282,8 +282,8 @@ def extract_and_upload_attachments(message: message.Message, realm: Realm) -> st formatted_link = "[%s](%s)" % (filename, s3_url) attachment_links.append(formatted_link) else: - logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." % - (filename, message.get("From"))) + logger.warning("Payload is not bytes (invalid attachment %s in message from %s).", + filename, message.get("From")) return '\n'.join(attachment_links) @@ -343,8 +343,10 @@ def process_stream_message(to: str, message: message.Message) -> None: send_zulip( get_system_bot(settings.EMAIL_GATEWAY_BOT), stream, subject, body) - logger.info("Successfully processed email to %s (%s)" % ( - stream.name, stream.realm.string_id)) + logger.info( + "Successfully processed email to %s (%s)", + stream.name, stream.realm.string_id, + ) def process_missed_message(to: str, message: message.Message) -> None: mm_address = get_usable_missed_message_address(to) @@ -389,8 +391,10 @@ def process_missed_message(to: str, message: message.Message) -> None: else: raise AssertionError("Invalid recipient type!") - logger.info("Successfully processed email from user %s to %s" % ( - user_profile.id, recipient_str)) + logger.info( + "Successfully processed email from user %s to %s", + user_profile.id, recipient_str, + ) def process_message(message: message.Message, rcpt_to: Optional[str]=None) -> None: to: Optional[str] = None diff --git a/zerver/lib/export.py b/zerver/lib/export.py index 7e185a7440..e7256c68bf 100644 --- a/zerver/lib/export.py +++ b/zerver/lib/export.py @@ -301,7 +301,7 @@ def sanity_check_output(data: TableData) -> None: for table in tables: if table not in data: - logging.warning('??? NO DATA EXPORTED FOR TABLE %s!!!' % (table,)) + logging.warning('??? NO DATA EXPORTED FOR TABLE %s!!!', table) def write_data_to_file(output_file: Path, data: Any) -> None: with open(output_file, "w") as f: @@ -451,7 +451,7 @@ def export_from_config(response: TableData, config: Config, seed_object: Optiona exported_tables = config.custom_tables for t in exported_tables: - logging.info('Exporting via export_from_config: %s' % (t,)) + logging.info('Exporting via export_from_config: %s', t) rows = None if config.is_seeded: @@ -476,7 +476,7 @@ def export_from_config(response: TableData, config: Config, seed_object: Optiona for t in config.concat_and_destroy: data += response[t] del response[t] - logging.info('Deleted temporary %s' % (t,)) + logging.info('Deleted temporary %s', t) assert table is not None response[table] = data @@ -945,7 +945,7 @@ def fetch_usermessages(realm: Realm, user_message_obj['flags_mask'] = user_message.flags.mask del user_message_obj['flags'] user_message_chunk.append(user_message_obj) - logging.info("Fetched UserMessages for %s" % (message_filename,)) + logging.info("Fetched UserMessages for %s", message_filename) return user_message_chunk def export_usermessages_batch(input_path: Path, output_path: Path, @@ -968,7 +968,7 @@ def export_usermessages_batch(input_path: Path, output_path: Path, def write_message_export(message_filename: Path, output: MessageOutput) -> None: write_data_to_file(output_file=message_filename, data=output) - logging.info("Dumped to %s" % (message_filename,)) + logging.info("Dumped to %s", message_filename) def export_partial_message_files(realm: Realm, response: TableData, @@ -1107,7 +1107,7 @@ def write_message_partial_for_query(realm: Realm, message_query: Any, dump_file_ # Figure out the name of our shard file. message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,)) message_filename += '.partial' - logging.info("Fetched Messages for %s" % (message_filename,)) + logging.info("Fetched Messages for %s", message_filename) # Clean up our messages. table_data: TableData = {} @@ -1251,7 +1251,7 @@ def export_files_from_s3(realm: Realm, bucket_name: str, output_dir: Path, bucket = conn.get_bucket(bucket_name, validate=True) records = [] - logging.info("Downloading uploaded files from %s" % (bucket_name,)) + logging.info("Downloading uploaded files from %s", bucket_name) avatar_hash_values = set() user_ids = set() @@ -1293,7 +1293,7 @@ def export_files_from_s3(realm: Realm, bucket_name: str, output_dir: Path, count += 1 if (count % 100 == 0): - logging.info("Finished %s" % (count,)) + logging.info("Finished %s", count) with open(os.path.join(output_dir, "records.json"), "w") as records_file: ujson.dump(records, records_file, indent=4) @@ -1321,7 +1321,7 @@ def export_uploads_from_local(realm: Realm, local_dir: Path, output_dir: Path) - count += 1 if (count % 100 == 0): - logging.info("Finished %s" % (count,)) + logging.info("Finished %s", count) with open(os.path.join(output_dir, "records.json"), "w") as records_file: ujson.dump(records, records_file, indent=4) @@ -1344,8 +1344,10 @@ def export_avatars_from_local(realm: Realm, local_dir: Path, output_dir: Path) - wildcard = os.path.join(local_dir, avatar_path + '.*') for local_path in glob.glob(wildcard): - logging.info('Copying avatar file for user %s from %s' % ( - user.email, local_path)) + logging.info( + 'Copying avatar file for user %s from %s', + user.email, local_path, + ) fn = os.path.relpath(local_path, local_dir) output_path = os.path.join(output_dir, fn) os.makedirs(str(os.path.dirname(output_path)), exist_ok=True) @@ -1364,7 +1366,7 @@ def export_avatars_from_local(realm: Realm, local_dir: Path, output_dir: Path) - count += 1 if (count % 100 == 0): - logging.info("Finished %s" % (count,)) + logging.info("Finished %s", count) with open(os.path.join(output_dir, "records.json"), "w") as records_file: ujson.dump(records, records_file, indent=4) @@ -1416,7 +1418,7 @@ def export_emoji_from_local(realm: Realm, local_dir: Path, output_dir: Path) -> count += 1 if (count % 100 == 0): - logging.info("Finished %s" % (count,)) + logging.info("Finished %s", count) with open(os.path.join(output_dir, "records.json"), "w") as records_file: ujson.dump(records, records_file, indent=4) @@ -1428,7 +1430,7 @@ def do_write_stats_file_for_realm_export(output_dir: Path) -> None: message_files = glob.glob(os.path.join(output_dir, 'messages-*.json')) fns = sorted([analytics_file] + [attachment_file] + message_files + [realm_file]) - logging.info('Writing stats file: %s\n' % (stats_file,)) + logging.info('Writing stats file: %s\n', stats_file) with open(stats_file, 'w') as f: for fn in fns: f.write(os.path.basename(fn) + '\n') @@ -1487,7 +1489,7 @@ def do_export_realm(realm: Realm, output_dir: Path, threads: int, message_ids = export_partial_message_files(realm, response, output_dir=output_dir, public_only=public_only, consent_message_id=consent_message_id) - logging.info('%d messages were exported' % (len(message_ids),)) + logging.info('%d messages were exported', len(message_ids)) # zerver_reaction zerver_reaction: TableData = {} @@ -1497,7 +1499,7 @@ def do_export_realm(realm: Realm, output_dir: Path, threads: int, # Write realm data export_file = os.path.join(output_dir, "realm.json") write_data_to_file(output_file=export_file, data=response) - logging.info('Writing realm data to %s' % (export_file,)) + logging.info('Writing realm data to %s', export_file) # Write analytics data export_analytics_tables(realm=realm, output_dir=output_dir) @@ -1509,7 +1511,7 @@ def do_export_realm(realm: Realm, output_dir: Path, threads: int, launch_user_message_subprocesses(threads=threads, output_dir=output_dir, consent_message_id=consent_message_id) - logging.info("Finished exporting %s" % (realm.string_id,)) + logging.info("Finished exporting %s", realm.string_id) create_soft_link(source=output_dir, in_progress=False) do_write_stats_file_for_realm_export(output_dir) @@ -1528,7 +1530,7 @@ def export_attachment_table(realm: Realm, output_dir: Path, message_ids: Set[int response: TableData = {} fetch_attachment_data(response=response, realm_id=realm.id, message_ids=message_ids) output_file = os.path.join(output_dir, "attachment.json") - logging.info('Writing attachment table data to %s' % (output_file,)) + logging.info('Writing attachment table data to %s', output_file) write_data_to_file(output_file=output_file, data=response) def create_soft_link(source: Path, in_progress: bool=True) -> None: @@ -1551,11 +1553,11 @@ def create_soft_link(source: Path, in_progress: bool=True) -> None: overwrite_symlink(source, new_target) if is_done: - logging.info('See %s for output files' % (new_target,)) + logging.info('See %s for output files', new_target) def launch_user_message_subprocesses(threads: int, output_dir: Path, consent_message_id: Optional[int]=None) -> None: - logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,)) + logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows', threads) pids = {} for shard_id in range(threads): @@ -1669,7 +1671,7 @@ def export_messages_single_user(user_profile: UserProfile, output_dir: Path, message_chunk.append(item) message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,)) - logging.info("Fetched Messages for %s" % (message_filename,)) + logging.info("Fetched Messages for %s", message_filename) output = {'zerver_message': message_chunk} floatify_datetime_fields(output, 'zerver_message') diff --git a/zerver/lib/fix_unreads.py b/zerver/lib/fix_unreads.py index c69b6c68c1..88358c6d9d 100644 --- a/zerver/lib/fix_unreads.py +++ b/zerver/lib/fix_unreads.py @@ -63,7 +63,7 @@ def get_timing(message: str, f: Callable[[], None]) -> None: logger.info(message) f() elapsed = time.time() - start - logger.info('elapsed time: %.03f\n' % (elapsed,)) + logger.info('elapsed time: %.03f\n', elapsed) def fix_unsubscribed(cursor: CursorObj, user_profile: UserProfile) -> None: @@ -126,7 +126,7 @@ def fix_unsubscribed(cursor: CursorObj, user_profile: UserProfile) -> None: rows = cursor.fetchall() for row in rows: user_message_ids.append(row[0]) - logger.info('rows found: %d' % (len(user_message_ids),)) + logger.info('rows found: %d', len(user_message_ids)) get_timing( 'finding unread messages for non-active streams', @@ -216,7 +216,7 @@ def fix_pre_pointer(cursor: CursorObj, user_profile: UserProfile) -> None: for (um_id, recipient_id, topic) in rows: if not is_topic_muted(recipient_id, topic): user_message_ids.append(um_id) - logger.info('rows found: %d' % (len(user_message_ids),)) + logger.info('rows found: %d', len(user_message_ids)) get_timing( 'finding pre-pointer messages that are not muted', @@ -235,7 +235,7 @@ def fix_pre_pointer(cursor: CursorObj, user_profile: UserProfile) -> None: ) def fix(user_profile: UserProfile) -> None: - logger.info('\n---\nFixing %s:' % (user_profile.id,)) + logger.info('\n---\nFixing %s:', user_profile.id) with connection.cursor() as cursor: fix_unsubscribed(cursor, user_profile) fix_pre_pointer(cursor, user_profile) diff --git a/zerver/lib/i18n.py b/zerver/lib/i18n.py index c267ef12af..fbdd488b18 100644 --- a/zerver/lib/i18n.py +++ b/zerver/lib/i18n.py @@ -72,7 +72,7 @@ def get_language_name(code: str) -> str: if code in (lang['code'], lang['locale']): return lang['name'] # Log problem, but still return a name - logging.error("Unknown language code '%s'" % (code,)) + logging.error("Unknown language code '%s'", code) return "Unknown" def get_available_language_codes() -> List[str]: diff --git a/zerver/lib/import_realm.py b/zerver/lib/import_realm.py index 55720e00a2..0e9764748c 100644 --- a/zerver/lib/import_realm.py +++ b/zerver/lib/import_realm.py @@ -317,7 +317,7 @@ def fix_message_rendered_content(realm: Realm, # * rendering markdown failing with the exception being # caught in bugdown (which then returns None, causing the the # rendered_content assert above to fire). - logging.warning("Error in markdown rendering for message ID %s; continuing" % (message['id'],)) + logging.warning("Error in markdown rendering for message ID %s; continuing", message['id']) def current_table_ids(data: TableData, table: TableName) -> List[int]: """ @@ -428,10 +428,8 @@ def re_map_foreign_keys_internal(data_table: List[Record], if old_id in lookup_table: new_id = lookup_table[old_id] if verbose: - logging.info('Remapping %s %s from %s to %s' % (table, - field_name + '_id', - old_id, - new_id)) + logging.info('Remapping %s %s from %s to %s', + table, field_name + '_id', old_id, new_id) else: new_id = old_id if not id_field: @@ -479,10 +477,8 @@ def re_map_foreign_keys_many_to_many_internal(table: TableName, if old_id in lookup_table: new_id = lookup_table[old_id] if verbose: - logging.info('Remapping %s %s from %s to %s' % (table, - field_name + '_id', - old_id, - new_id)) + logging.info('Remapping %s %s from %s to %s', + table, field_name + '_id', old_id, new_id) else: new_id = old_id new_id_list.append(new_id) @@ -566,16 +562,16 @@ def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None: process_batch=process_batch, ) - logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id)) + logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id) def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None: table = get_db_table(model) # TODO, deprecate dump_file_id model.objects.bulk_create(model(**item) for item in data[table]) if dump_file_id is None: - logging.info("Successfully imported %s from %s." % (model, table)) + logging.info("Successfully imported %s from %s.", model, table) else: - logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id)) + logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id) # Client is a table shared by multiple realms, so in order to # correctly import multiple realms into the same server, we need to @@ -627,7 +623,7 @@ def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_av for record in records: count += 1 if count % 1000 == 0: - logging.info("Processed %s/%s uploads" % (count, len(records))) + logging.info("Processed %s/%s uploads", count, len(records)) if processing_avatars: # For avatars, we need to rehash the user ID with the @@ -677,7 +673,7 @@ def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_av user_profile_id = int(record['user_profile_id']) # Support email gateway bot and other cross-realm messages if user_profile_id in ID_MAP["user_profile"]: - logging.info("Uploaded by ID mapped user: %s!" % (user_profile_id,)) + logging.info("Uploaded by ID mapped user: %s!", user_profile_id) user_profile_id = ID_MAP["user_profile"][user_profile_id] user_profile = get_user_profile_by_id(user_profile_id) key.set_metadata("user_profile_id", str(user_profile.id)) @@ -733,8 +729,10 @@ def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_av if record.get("importer_should_thumbnail"): upload_backend.ensure_basic_avatar_image(user_profile=user_profile) except BadImageError: - logging.warning("Could not thumbnail avatar image for user %s; ignoring" % ( - user_profile.id,)) + logging.warning( + "Could not thumbnail avatar image for user %s; ignoring", + user_profile.id, + ) # Delete the record of the avatar to avoid 404s. do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR) return 0 @@ -767,7 +765,7 @@ def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_av # faithful, we have to use a set of fixers (e.g. on DateTime objects # and Foreign Keys) to do the import correctly. def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm: - logging.info("Importing realm dump %s" % (import_dir,)) + logging.info("Importing realm dump %s", import_dir) if not os.path.exists(import_dir): raise Exception("Missing import directory!") @@ -778,7 +776,7 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm if not server_initialized(): create_internal_realm() - logging.info("Importing realm data from %s" % (realm_data_filename,)) + logging.info("Importing realm data from %s", realm_data_filename) with open(realm_data_filename) as f: data = ujson.load(f) remove_denormalized_recipient_column_from_data(data) @@ -833,7 +831,7 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm # Remap the user IDs for notification_bot and friends to their # appropriate IDs on this server for item in data['zerver_userprofile_crossrealm']: - logging.info("Adding to ID map: %s %s" % (item['id'], get_system_bot(item['email']).id)) + logging.info("Adding to ID map: %s %s", item['id'], get_system_bot(item['email']).id) new_user_id = get_system_bot(item['email']).id update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id) new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id @@ -1070,7 +1068,7 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm if not os.path.exists(fn): raise Exception("Missing attachment.json file!") - logging.info("Importing attachment data from %s" % (fn,)) + logging.info("Importing attachment data from %s", fn) with open(fn) as f: data = ujson.load(f) @@ -1194,7 +1192,7 @@ def import_message_data(realm: Realm, with open(message_filename) as f: data = ujson.load(f) - logging.info("Importing message dump %s" % (message_filename,)) + logging.info("Importing message dump %s", message_filename) re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile") re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table="recipient") re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client') @@ -1299,14 +1297,14 @@ def import_attachments(data: TableData) -> None: tups = [(row[parent_id], row[child_id]) for row in m2m_rows] cursor.executemany(sql_template, tups) - logging.info('Successfully imported M2M table %s' % (m2m_table_name,)) + logging.info('Successfully imported M2M table %s', m2m_table_name) def import_analytics_data(realm: Realm, import_dir: Path) -> None: analytics_filename = os.path.join(import_dir, "analytics.json") if not os.path.exists(analytics_filename): return - logging.info("Importing analytics data from %s" % (analytics_filename,)) + logging.info("Importing analytics data from %s", analytics_filename) with open(analytics_filename) as f: data = ujson.load(f) diff --git a/zerver/lib/outgoing_webhook.py b/zerver/lib/outgoing_webhook.py index 1b31ebdf71..4875c1b162 100644 --- a/zerver/lib/outgoing_webhook.py +++ b/zerver/lib/outgoing_webhook.py @@ -244,8 +244,10 @@ def request_retry(event: Dict[str, Any], bot_user = get_user_profile_by_id(event['user_profile_id']) fail_with_message(event, "Bot is unavailable") notify_bot_owner(event, request_data, failure_message=failure_message) - logging.warning("Maximum retries exceeded for trigger:%s event:%s" % ( - bot_user.email, event['command'])) + logging.warning( + "Maximum retries exceeded for trigger:%s event:%s", + bot_user.email, event['command'], + ) retry_event('outgoing_webhooks', event, failure_processor) @@ -288,23 +290,25 @@ def do_rest_call(base_url: str, else: logging.warning("Message %(message_url)s triggered an outgoing webhook, returning status " "code %(status_code)s.\n Content of response (in quotes): \"" - "%(response)s\"" - % {'message_url': get_message_url(event), - 'status_code': response.status_code, - 'response': response.content}) + "%(response)s\"", + {'message_url': get_message_url(event), + 'status_code': response.status_code, + 'response': response.content}) failure_message = "Third party responded with %d" % (response.status_code,) fail_with_message(event, failure_message) notify_bot_owner(event, request_data, response.status_code, response.content) except requests.exceptions.Timeout: - logging.info("Trigger event %s on %s timed out. Retrying" % ( - event["command"], event['service_name'])) + logging.info( + "Trigger event %s on %s timed out. Retrying", + event["command"], event['service_name'], + ) failure_message = "A timeout occurred." request_retry(event, request_data, failure_message=failure_message) except requests.exceptions.ConnectionError: - logging.info("Trigger event %s on %s resulted in a connection error. Retrying" - % (event["command"], event['service_name'])) + logging.info("Trigger event %s on %s resulted in a connection error. Retrying", + event["command"], event['service_name']) failure_message = "A connection error occurred. Is my bot server down?" request_retry(event, request_data, failure_message=failure_message) diff --git a/zerver/lib/push_notifications.py b/zerver/lib/push_notifications.py index b246d4bb54..92cb4e116b 100644 --- a/zerver/lib/push_notifications.py +++ b/zerver/lib/push_notifications.py @@ -164,7 +164,7 @@ def send_apple_push_notification(user_id: int, devices: List[DeviceToken], logger.info("APNs: Success sending for user %d to device %s", user_id, device.token) elif result in ["Unregistered", "BadDeviceToken", "DeviceTokenNotForTopic"]: - logger.info("APNs: Removing invalid/expired token %s (%s)" % (device.token, result)) + logger.info("APNs: Removing invalid/expired token %s (%s)", device.token, result) # We remove all entries for this token (There # could be multiple for different Zulip servers). DeviceTokenClass.objects.filter(token=device.token, kind=DeviceTokenClass.APNS).delete() @@ -280,7 +280,7 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A if res and 'success' in res: for reg_id, msg_id in res['success'].items(): - logger.info("GCM: Sent %s as %s" % (reg_id, msg_id)) + logger.info("GCM: Sent %s as %s", reg_id, msg_id) if remote: DeviceTokenClass = RemotePushDeviceToken @@ -294,7 +294,7 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A for reg_id, new_reg_id in res['canonical'].items(): if reg_id == new_reg_id: # I'm not sure if this should happen. In any case, not really actionable. - logger.warning("GCM: Got canonical ref but it already matches our ID %s!" % (reg_id,)) + logger.warning("GCM: Got canonical ref but it already matches our ID %s!", reg_id) elif not DeviceTokenClass.objects.filter(token=new_reg_id, kind=DeviceTokenClass.GCM).count(): # This case shouldn't happen; any time we get a canonical ref it should have been @@ -302,13 +302,13 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A # # That said, recovery is easy: just update the current PDT object to use the new ID. logger.warning( - "GCM: Got canonical ref %s replacing %s but new ID not registered! Updating." % - (new_reg_id, reg_id)) + "GCM: Got canonical ref %s replacing %s but new ID not registered! Updating.", + new_reg_id, reg_id) DeviceTokenClass.objects.filter( token=reg_id, kind=DeviceTokenClass.GCM).update(token=new_reg_id) else: # Since we know the new ID is registered in our system we can just drop the old one. - logger.info("GCM: Got canonical ref %s, dropping %s" % (new_reg_id, reg_id)) + logger.info("GCM: Got canonical ref %s, dropping %s", new_reg_id, reg_id) DeviceTokenClass.objects.filter(token=reg_id, kind=DeviceTokenClass.GCM).delete() @@ -316,13 +316,13 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A for error, reg_ids in res['errors'].items(): if error in ['NotRegistered', 'InvalidRegistration']: for reg_id in reg_ids: - logger.info("GCM: Removing %s" % (reg_id,)) + logger.info("GCM: Removing %s", reg_id) # We remove all entries for this token (There # could be multiple for different Zulip servers). DeviceTokenClass.objects.filter(token=reg_id, kind=DeviceTokenClass.GCM).delete() else: for reg_id in reg_ids: - logger.warning("GCM: Delivery to %s failed: %s" % (reg_id, error)) + logger.warning("GCM: Delivery to %s failed: %s", reg_id, error) # python-gcm handles retrying of the unsent messages. # Ref: https://github.com/geeknam/python-gcm/blob/master/gcm/gcm.py#L497 @@ -707,8 +707,10 @@ def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any # If the cause is a race with the message being deleted, # that's normal and we have no need to log an error. return - logging.error("Unexpected message access failure handling push notifications: %s %s" % ( - user_profile.id, missed_message['message_id'])) + logging.error( + "Unexpected message access failure handling push notifications: %s %s", + user_profile.id, missed_message['message_id'], + ) return if user_message is not None: @@ -730,15 +732,17 @@ def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any # queue for messages they haven't received if they're # long-term idle; anything else is likely a bug. if not user_profile.long_term_idle: - logger.error("Could not find UserMessage with message_id %s and user_id %s" % ( - missed_message['message_id'], user_profile_id)) + logger.error( + "Could not find UserMessage with message_id %s and user_id %s", + missed_message['message_id'], user_profile_id, + ) return message.trigger = missed_message['trigger'] apns_payload = get_message_payload_apns(user_profile, message) gcm_payload, gcm_options = get_message_payload_gcm(user_profile, message) - logger.info("Sending push notifications to mobile clients for user %s" % (user_profile_id,)) + logger.info("Sending push notifications to mobile clients for user %s", user_profile_id) if uses_notification_bouncer(): send_notifications_to_bouncer(user_profile_id, diff --git a/zerver/lib/queue.py b/zerver/lib/queue.py index 15b92fa9d8..bb361114a1 100644 --- a/zerver/lib/queue.py +++ b/zerver/lib/queue.py @@ -195,7 +195,7 @@ class ExceptionFreeTornadoConnection(pika.adapters.tornado_connection.TornadoCon pika.exceptions.ProbableAccessDeniedError, pika.exceptions.IncompatibleProtocolError) as e: logging.warning("Caught exception '%r' in ExceptionFreeTornadoConnection when \ -calling _adapter_disconnect, ignoring" % (e,)) +calling _adapter_disconnect, ignoring", e) class TornadoQueueClient(SimpleQueueClient): diff --git a/zerver/lib/rate_limiter.py b/zerver/lib/rate_limiter.py index 4eaad9d7bd..e631be9cab 100644 --- a/zerver/lib/rate_limiter.py +++ b/zerver/lib/rate_limiter.py @@ -455,7 +455,7 @@ class RedisRateLimiterBackend(RateLimiterBackend): try: cls.incr_ratelimit(entity_key, max_api_calls, max_api_window) except RateLimiterLockingException: - logger.warning("Deadlock trying to incr_ratelimit for %s" % (entity_key,)) + logger.warning("Deadlock trying to incr_ratelimit for %s", entity_key) # rate-limit users who are hitting the API so hard we can't update our stats. ratelimited = True diff --git a/zerver/lib/retention.py b/zerver/lib/retention.py index d2f46cfc58..771e44f1b9 100644 --- a/zerver/lib/retention.py +++ b/zerver/lib/retention.py @@ -89,7 +89,7 @@ def run_archiving_in_chunks(query: str, type: int, realm: Optional[Realm]=None, while True: with transaction.atomic(): archive_transaction = ArchiveTransaction.objects.create(type=type, realm=realm) - logger.info("Archiving in {}".format(archive_transaction)) + logger.info("Archiving in %s", archive_transaction) new_chunk = move_rows(Message, query, chunk_size=chunk_size, returning_id=True, archive_transaction_id=archive_transaction.id, **kwargs) if new_chunk: @@ -102,7 +102,7 @@ def run_archiving_in_chunks(query: str, type: int, realm: Optional[Realm]=None, # This line needs to be outside of the atomic block, to capture the actual moment # archiving of the chunk is finished (since Django does some significant additional work # when leaving the block). - logger.info("Finished. Archived {} messages in this transaction.".format(len(new_chunk))) + logger.info("Finished. Archived %s messages in this transaction.", len(new_chunk)) # We run the loop, until the query returns fewer results than chunk_size, # which means we are done: @@ -269,7 +269,7 @@ def archive_messages_by_recipient(recipient: Recipient, message_retention_days: def archive_personal_and_huddle_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> None: logger.info("Archiving personal and huddle messages for realm " + realm.string_id) message_count = move_expired_personal_and_huddle_messages_to_archive(realm, chunk_size) - logger.info("Done. Archived {} messages".format(message_count)) + logger.info("Done. Archived %s messages", message_count) def archive_stream_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> None: logger.info("Archiving stream messages for realm " + realm.string_id) @@ -296,10 +296,10 @@ def archive_stream_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> recipient, retention_policy_dict[recipient.type_id], realm, chunk_size ) - logger.info("Done. Archived {} messages.".format(message_count)) + logger.info("Done. Archived %s messages.", message_count) def archive_messages(chunk_size: int=MESSAGE_BATCH_SIZE) -> None: - logger.info("Starting the archiving process with chunk_size {}".format(chunk_size)) + logger.info("Starting the archiving process with chunk_size %s", chunk_size) # We exclude SYSTEM_BOT_REALM here because the logic for archiving # private messages and huddles isn't designed to correctly handle @@ -393,7 +393,7 @@ def restore_attachment_messages_from_archive(archive_transaction_id: int) -> Non cursor.execute(query.format(archive_transaction_id=archive_transaction_id)) def restore_data_from_archive(archive_transaction: ArchiveTransaction) -> int: - logger.info("Restoring {}".format(archive_transaction)) + logger.info("Restoring %s", archive_transaction) # transaction.atomic needs to be used here, rather than being a wrapper on the whole function, # so that when we log "Finished", the process has indeed finished - and that happens only after # leaving the atomic block - Django does work committing the changes to the database when @@ -407,7 +407,7 @@ def restore_data_from_archive(archive_transaction: ArchiveTransaction) -> int: archive_transaction.restored = True archive_transaction.save() - logger.info("Finished. Restored {} messages".format(len(msg_ids))) + logger.info("Finished. Restored %s messages", len(msg_ids)) return len(msg_ids) def restore_data_from_archive_by_transactions(archive_transactions: List[ArchiveTransaction]) -> int: @@ -422,10 +422,10 @@ def restore_data_from_archive_by_transactions(archive_transactions: List[Archive def restore_data_from_archive_by_realm(realm: Realm) -> None: transactions = ArchiveTransaction.objects.exclude(restored=True).filter( realm=realm, type=ArchiveTransaction.RETENTION_POLICY_BASED) - logger.info("Restoring {} transactions from realm {}".format(len(transactions), realm.string_id)) + logger.info("Restoring %s transactions from realm %s", len(transactions), realm.string_id) message_count = restore_data_from_archive_by_transactions(transactions) - logger.info("Finished. Restored {} messages from realm {}".format(message_count, realm.string_id)) + logger.info("Finished. Restored %s messages from realm %s", message_count, realm.string_id) def restore_all_data_from_archive(restore_manual_transactions: bool=True) -> None: for realm in Realm.objects.all(): @@ -444,4 +444,4 @@ def clean_archived_data() -> None: count = transactions.count() transactions.delete() - logger.info("Deleted {} old ArchiveTransactions.".format(count)) + logger.info("Deleted %s old ArchiveTransactions.", count) diff --git a/zerver/lib/send_email.py b/zerver/lib/send_email.py index 43f6653e81..da7bbcc753 100644 --- a/zerver/lib/send_email.py +++ b/zerver/lib/send_email.py @@ -100,7 +100,7 @@ def build_email(template_prefix: str, to_user_ids: Optional[List[int]]=None, (html_message, message, email_subject) = render_templates() else: (html_message, message, email_subject) = render_templates() - logger.warning("Missing language for email template '{}'".format(template_prefix)) + logger.warning("Missing language for email template '%s'", template_prefix) if from_name is None: from_name = "Zulip" @@ -153,10 +153,10 @@ def send_email(template_prefix: str, to_user_ids: Optional[List[int]]=None, from_name=from_name, from_address=from_address, reply_to_email=reply_to_email, language=language, context=context) template = template_prefix.split("/")[-1] - logger.info("Sending %s email to %s" % (template, mail.to)) + logger.info("Sending %s email to %s", template, mail.to) if mail.send() == 0: - logger.error("Error sending %s email to %s" % (template, mail.to)) + logger.error("Error sending %s email to %s", template, mail.to) raise EmailNotDeliveredException def send_email_from_dict(email_dict: Mapping[str, Any]) -> None: diff --git a/zerver/lib/sessions.py b/zerver/lib/sessions.py index e7043b2668..d61a4cbecd 100644 --- a/zerver/lib/sessions.py +++ b/zerver/lib/sessions.py @@ -53,7 +53,7 @@ def delete_all_deactivated_user_sessions() -> None: continue user_profile = get_user_profile_by_id(user_profile_id) if not user_profile.is_active or user_profile.realm.deactivated: - logging.info("Deactivating session for deactivated user %s" % (user_profile.id,)) + logging.info("Deactivating session for deactivated user %s", user_profile.id) delete_session(session) def set_expirable_session_var(session: Session, var_name: str, var_value: Any, expiry_seconds: int) -> None: @@ -68,7 +68,7 @@ def get_expirable_session_var(session: Session, var_name: str, default_value: An try: value, expire_at = (session[var_name]['value'], session[var_name]['expire_at']) except (KeyError, TypeError) as e: - logging.warning("get_expirable_session_var: Variable {}: {}".format(var_name, e)) + logging.warning("get_expirable_session_var: Variable %s: %s", var_name, e) return default_value if timestamp_to_datetime(expire_at) < timezone_now(): diff --git a/zerver/lib/soft_deactivation.py b/zerver/lib/soft_deactivation.py index 727ea91a82..2da8e91bd5 100644 --- a/zerver/lib/soft_deactivation.py +++ b/zerver/lib/soft_deactivation.py @@ -210,7 +210,7 @@ def do_soft_deactivate_user(user_profile: UserProfile) -> None: user_profile.save(update_fields=[ 'long_term_idle', 'last_active_message_id']) - logger.info('Soft Deactivated user %s' % (user_profile.id,)) + logger.info('Soft Deactivated user %s', user_profile.id) def do_soft_deactivate_users(users: List[UserProfile]) -> List[UserProfile]: BATCH_SIZE = 100 @@ -234,8 +234,8 @@ def do_soft_deactivate_users(users: List[UserProfile]) -> List[UserProfile]: users_soft_deactivated.append(user) RealmAuditLog.objects.bulk_create(realm_logs) - logging.info("Soft-deactivated batch of %s users; %s remain to process" % - (len(user_batch), len(users))) + logging.info("Soft-deactivated batch of %s users; %s remain to process", + len(user_batch), len(users)) return users_soft_deactivated @@ -267,7 +267,7 @@ def reactivate_user_if_soft_deactivated(user_profile: UserProfile) -> Union[User event_type=RealmAuditLog.USER_SOFT_ACTIVATED, event_time=timezone_now() ) - logger.info('Soft Reactivated user %s' % (user_profile.id,)) + logger.info('Soft Reactivated user %s', user_profile.id) return user_profile return None @@ -301,7 +301,7 @@ def do_catch_up_soft_deactivated_users(users: List[UserProfile]) -> List[UserPro if user_profile.long_term_idle: add_missing_messages(user_profile) users_caught_up.append(user_profile) - logger.info("Caught up %d soft-deactivated users" % (len(users_caught_up),)) + logger.info("Caught up %d soft-deactivated users", len(users_caught_up)) return users_caught_up def get_soft_deactivated_users_for_catch_up(filter_kwargs: Any) -> List[UserProfile]: diff --git a/zerver/lib/transfer.py b/zerver/lib/transfer.py index 3c67dc34f6..aeab61fc95 100644 --- a/zerver/lib/transfer.py +++ b/zerver/lib/transfer.py @@ -25,7 +25,7 @@ def transfer_avatars_to_s3(processes: int) -> None: try: with open(file_path, 'rb') as f: s3backend.upload_avatar_image(f, user, user) - logging.info("Uploaded avatar for {} in realm {}".format(user.id, user.realm.name)) + logging.info("Uploaded avatar for %s in realm %s", user.id, user.realm.name) except FileNotFoundError: pass return 0 @@ -48,7 +48,7 @@ def transfer_message_files_to_s3(processes: int) -> None: bucket_name = settings.S3_AUTH_UPLOADS_BUCKET guessed_type = guess_type(attachment.file_name)[0] upload_image_to_s3(bucket_name, attachment.path_id, guessed_type, attachment.owner, f.read()) - logging.info("Uploaded message file in path {}".format(file_path)) + logging.info("Uploaded message file in path %s", file_path) except FileNotFoundError: # nocoverage pass return 0 @@ -75,7 +75,7 @@ def transfer_emoji_to_s3(processes: int) -> None: try: with open(emoji_path, 'rb') as f: s3backend.upload_emoji_image(f, realm_emoji.file_name, realm_emoji.author) - logging.info("Uploaded emoji file in path {}".format(emoji_path)) + logging.info("Uploaded emoji file in path %s", emoji_path) except FileNotFoundError: # nocoverage pass return 0 diff --git a/zerver/lib/upload.py b/zerver/lib/upload.py index 8c6982ad70..bd239f85c1 100644 --- a/zerver/lib/upload.py +++ b/zerver/lib/upload.py @@ -357,7 +357,7 @@ class S3UploadBackend(ZulipUploadBackend): return True file_name = path_id.split("/")[-1] - logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,)) + logging.warning("%s does not exist. Its entry in the database will be removed.", file_name) return False def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int, @@ -655,7 +655,7 @@ def delete_local_file(type: str, path: str) -> bool: os.remove(file_path) return True file_name = path.split("/")[-1] - logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,)) + logging.warning("%s does not exist. Its entry in the database will be removed.", file_name) return False def get_local_file_path(path_id: str) -> Optional[str]: diff --git a/zerver/logging_handlers.py b/zerver/logging_handlers.py index 938c9ecbc7..2bfd4380fa 100644 --- a/zerver/logging_handlers.py +++ b/zerver/logging_handlers.py @@ -138,9 +138,11 @@ class AdminNotifyHandler(logging.Handler): if settings.DEBUG_ERROR_REPORTING: # nocoverage logging.warning("Reporting an error to admins...") - logging.warning("Reporting an error to admins: {} {} {} {} {}" .format( + logging.warning( + "Reporting an error to admins: %s %s %s %s %s", record.levelname, report['logger_name'], report['log_module'], - report['message'], report['stack_trace'])) + report['message'], report['stack_trace'], + ) try: if settings.STAGING_ERROR_NOTIFICATIONS: diff --git a/zerver/management/commands/check_redis.py b/zerver/management/commands/check_redis.py index 8edebf08e3..d523de3f86 100644 --- a/zerver/management/commands/check_redis.py +++ b/zerver/management/commands/check_redis.py @@ -32,12 +32,12 @@ class Command(BaseCommand): age = int(client.ttl(key)) if age < 0: - logging.error("Found key with age of %s, will never expire: %s" % (age, key,)) + logging.error("Found key with age of %s, will never expire: %s", age, key) count = count_func() if count > max_calls: logging.error("Redis health check found key with more elements \ -than max_api_calls! (trying to trim) %s %s" % (key, count)) +than max_api_calls! (trying to trim) %s %s", key, count) if trim_func is not None: client.expire(key, entity.max_api_window()) trim_func(key, max_calls) diff --git a/zerver/management/commands/deliver_email.py b/zerver/management/commands/deliver_email.py index a352090ebd..023d3dbe45 100644 --- a/zerver/management/commands/deliver_email.py +++ b/zerver/management/commands/deliver_email.py @@ -46,7 +46,7 @@ Usage: ./manage.py deliver_email try: deliver_email(job) except EmailNotDeliveredException: - logger.warning("%r not delivered" % (job,)) + logger.warning("%r not delivered", job) time.sleep(10) else: # Less load on the db during times of activity, diff --git a/zerver/management/commands/export_usermessage_batch.py b/zerver/management/commands/export_usermessage_batch.py index 72c70d31c8..4944e594e4 100644 --- a/zerver/management/commands/export_usermessage_batch.py +++ b/zerver/management/commands/export_usermessage_batch.py @@ -32,7 +32,7 @@ class Command(BaseCommand): help='ID of the message advertising users to react with thumbs up') def handle(self, *args: Any, **options: Any) -> None: - logging.info("Starting UserMessage batch thread %s" % (options['thread'],)) + logging.info("Starting UserMessage batch thread %s", options['thread']) files = set(glob.glob(os.path.join(options['path'], 'messages-*.json.partial'))) for partial_path in files: locked_path = partial_path.replace(".json.partial", ".json.locked") @@ -42,7 +42,7 @@ class Command(BaseCommand): except Exception: # Already claimed by another process continue - logging.info("Thread %s processing %s" % (options['thread'], output_path)) + logging.info("Thread %s processing %s", options['thread'], output_path) try: export_usermessages_batch(locked_path, output_path, options["consent_message_id"]) except Exception: diff --git a/zerver/management/commands/process_queue.py b/zerver/management/commands/process_queue.py index 8343b92e5a..785f62078d 100644 --- a/zerver/management/commands/process_queue.py +++ b/zerver/management/commands/process_queue.py @@ -57,7 +57,7 @@ class Command(BaseCommand): td = Threaded_worker(queue_name) td.start() assert len(queues) == cnt - logger.info('%d queue worker threads were launched' % (cnt,)) + logger.info('%d queue worker threads were launched', cnt) if options['all']: signal.signal(signal.SIGUSR1, exit_with_three) @@ -70,12 +70,12 @@ class Command(BaseCommand): queue_name = options['queue_name'] worker_num = options['worker_num'] - logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name)) + logger.info("Worker %d connecting to queue %s", worker_num, queue_name) worker = get_worker(queue_name) worker.setup() def signal_handler(signal: int, frame: FrameType) -> None: - logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name)) + logger.info("Worker %d disconnecting from queue %s", worker_num, queue_name) worker.stop() sys.exit(0) signal.signal(signal.SIGTERM, signal_handler) diff --git a/zerver/management/commands/set_message_flags.py b/zerver/management/commands/set_message_flags.py index 596d0a60a5..9cde3eadcd 100644 --- a/zerver/management/commands/set_message_flags.py +++ b/zerver/management/commands/set_message_flags.py @@ -74,7 +74,7 @@ class Command(ZulipBaseCommand): msgs.update(flags=models.F('flags').bitand(~flag)) if not options["for_real"]: - logging.info("Updating %s by %s %s" % (mids, op, flag)) + logging.info("Updating %s by %s %s", mids, op, flag) logging.info("Dry run completed. Run with --for-real to change message flags.") raise CommandError diff --git a/zerver/management/commands/sync_ldap_user_data.py b/zerver/management/commands/sync_ldap_user_data.py index 497ecdcd2b..a539b8422e 100644 --- a/zerver/management/commands/sync_ldap_user_data.py +++ b/zerver/management/commands/sync_ldap_user_data.py @@ -26,7 +26,7 @@ def sync_ldap_user_data(user_profiles: List[UserProfile], deactivation_protectio try: sync_user_from_ldap(u, logger) except ZulipLDAPException as e: - logger.error("Error attempting to update user %s:" % (u.delivery_email,)) + logger.error("Error attempting to update user %s:", u.delivery_email) logger.error(e) if deactivation_protection: diff --git a/zerver/middleware.py b/zerver/middleware.py index 2ba1aafb64..b91f7b38c8 100644 --- a/zerver/middleware.py +++ b/zerver/middleware.py @@ -230,7 +230,7 @@ def write_log_line(log_data: MutableMapping[str, Any], path: str, method: str, r error_data = repr(b''.join(error_content_list)) if len(error_data) > 200: error_data = "[content more than 200 characters]" - logger.info('status=%3d, data=%s, uid=%s' % (status_code, error_data, requestor_for_logs)) + logger.info('status=%3d, data=%s, uid=%s', status_code, error_data, requestor_for_logs) class LogRequests(MiddlewareMixin): # We primarily are doing logging using the process_view hook, but diff --git a/zerver/tests/test_auth_backends.py b/zerver/tests/test_auth_backends.py index 5f0af05deb..31abe7c401 100644 --- a/zerver/tests/test_auth_backends.py +++ b/zerver/tests/test_auth_backends.py @@ -1554,13 +1554,13 @@ class SAMLAuthBackendTest(SocialAuthBase): result = self.client_get('/login/saml/') self.assertEqual(result.status_code, 302) self.assertEqual('/login/', result.url) - m.assert_called_with("/login/saml/ : Bad idp param: KeyError: 'idp'.") + m.assert_called_with("/login/saml/ : Bad idp param: KeyError: %s.", "'idp'") with mock.patch('zproject.backends.logging.info') as m: result = self.client_get('/login/saml/?idp=bad_idp') self.assertEqual(result.status_code, 302) self.assertEqual('/login/', result.url) - m.assert_called_with("/login/saml/ : Bad idp param: KeyError: 'bad_idp'.") + m.assert_called_with("/login/saml/ : Bad idp param: KeyError: %s.", "'bad_idp'") def test_social_auth_invalid_email(self) -> None: """ @@ -1635,7 +1635,8 @@ class SAMLAuthBackendTest(SocialAuthBase): self.assertEqual(result.status_code, 302) self.assertEqual('/login/', result.url) m.assert_called_with( - "User authenticated with IdP test_idp but this provider is not enabled for this realm zephyr." + "User authenticated with IdP %s but this provider is not enabled for this realm %s.", + "test_idp", "zephyr", ) def test_social_auth_saml_login_bad_idp_arg(self) -> None: @@ -1739,8 +1740,10 @@ class GitHubAuthBackendTest(SocialAuthBase): email_data=email_data) self.assertEqual(result.status_code, 302) self.assertEqual(result.url, "/login/") - mock_warning.assert_called_once_with("Social auth (GitHub) failed " - "because user has no verified emails") + mock_warning.assert_called_once_with( + "Social auth (%s) failed because user has no verified emails", + "GitHub", + ) @override_settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp') def test_social_auth_github_team_not_member_failed(self) -> None: @@ -2016,8 +2019,11 @@ class GitHubAuthBackendTest(SocialAuthBase): email_data=email_data) self.assertEqual(result.status_code, 302) self.assertEqual(result.url, "/login/") - mock_warning.assert_called_once_with("Social auth (GitHub) failed because user has no verified" - " emails associated with the account") + mock_warning.assert_called_once_with( + "Social auth (%s) failed because user has no verified" + " emails associated with the account", + "GitHub", + ) def test_github_oauth2_email_not_associated(self) -> None: account_data_dict = dict(email='not-associated@zulip.com', name=self.name) @@ -2037,8 +2043,11 @@ class GitHubAuthBackendTest(SocialAuthBase): email_data=email_data) self.assertEqual(result.status_code, 302) self.assertEqual(result.url, "/login/") - mock_warning.assert_called_once_with("Social auth (GitHub) failed because user has no verified" - " emails associated with the account") + mock_warning.assert_called_once_with( + "Social auth (%s) failed because user has no verified" + " emails associated with the account", + "GitHub", + ) class GitLabAuthBackendTest(SocialAuthBase): __unittest_skip__ = False @@ -2085,8 +2094,10 @@ class GoogleAuthBackendTest(SocialAuthBase): subdomain='zulip') self.assertEqual(result.status_code, 302) self.assertEqual(result.url, "/login/") - mock_warning.assert_called_once_with("Social auth (Google) failed " - "because user has no verified emails") + mock_warning.assert_called_once_with( + "Social auth (%s) failed because user has no verified emails", + "Google", + ) def test_social_auth_mobile_success_legacy_url(self) -> None: mobile_flow_otp = '1234abcd' * 8 @@ -2171,7 +2182,7 @@ class GoogleAuthBackendTest(SocialAuthBase): 'redirect_to': ''} # type: ExternalAuthDataDict with mock.patch("logging.warning") as mock_warn: result = self.get_log_into_subdomain(data, force_token='nonsense') - mock_warn.assert_called_once_with("log_into_subdomain: Malformed token given: nonsense") + mock_warn.assert_called_once_with("log_into_subdomain: Malformed token given: %s", "nonsense") self.assertEqual(result.status_code, 400) def test_log_into_subdomain_when_token_not_found(self) -> None: @@ -2183,7 +2194,7 @@ class GoogleAuthBackendTest(SocialAuthBase): with mock.patch("logging.warning") as mock_warn: token = generate_random_token(ExternalAuthResult.LOGIN_TOKEN_LENGTH) result = self.get_log_into_subdomain(data, force_token=token) - mock_warn.assert_called_once_with("log_into_subdomain: Invalid token given: %s" % (token,)) + mock_warn.assert_called_once_with("log_into_subdomain: Invalid token given: %s", token) self.assertEqual(result.status_code, 400) self.assert_in_response("Invalid or expired login session.", result) @@ -3188,7 +3199,7 @@ class DjangoToLDAPUsernameTests(ZulipTestCase): with mock.patch("zproject.backends.logging.warning") as mock_warn: with self.assertRaises(ZulipLDAPExceptionNoMatchingLDAPUser): self.backend.django_to_ldap_username("shared_email@zulip.com") - mock_warn.assert_called_with("Multiple users with email shared_email@zulip.com found in LDAP.") + mock_warn.assert_called_with("Multiple users with email %s found in LDAP.", "shared_email@zulip.com") # Test on a weird case of a user whose uid is an email and his actual "mail" # attribute is a different email address: @@ -3740,7 +3751,7 @@ class TestZulipLDAPUserPopulator(ZulipLDAPTestCase): mock_logger = mock.MagicMock() result = sync_user_from_ldap(othello, mock_logger) mock_logger.warning.assert_called_once_with( - "Did not find %s in LDAP." % (othello.delivery_email,)) + "Did not find %s in LDAP.", othello.delivery_email) self.assertFalse(result) do_deactivate_user(othello) @@ -3810,7 +3821,8 @@ class TestZulipLDAPUserPopulator(ZulipLDAPTestCase): with mock.patch('logging.warning') as mock_warning: self.perform_ldap_sync(self.example_user('hamlet')) mock_warning.assert_called_once_with( - 'Could not parse jpegPhoto field for user %s' % (hamlet.id,)) + 'Could not parse %s field for user %s', 'jpegPhoto', hamlet.id, + ) def test_deactivate_non_matching_users(self) -> None: with self.settings(LDAP_APPEND_DOMAIN='zulip.com', diff --git a/zerver/tests/test_cache.py b/zerver/tests/test_cache.py index c016588bb5..8950791b62 100644 --- a/zerver/tests/test_cache.py +++ b/zerver/tests/test_cache.py @@ -199,16 +199,18 @@ class SafeCacheFunctionsTest(ZulipTestCase): with patch('zerver.lib.cache.logger.warning') as mock_warn: safe_cache_set_many(items) mock_warn.assert_called_once() - warning_string = mock_warn.call_args[0][0] - self.assertIn("badkey1", warning_string) - self.assertIn("badkey2", warning_string) + self.assertEqual( + mock_warn.call_args[0][1], + ['SafeFunctionsTest:\nbadkey1', 'SafeFunctionsTest:\nbadkey2'], + ) with patch('zerver.lib.cache.logger.warning') as mock_warn: result = safe_cache_get_many(list(items.keys())) mock_warn.assert_called_once() - warning_string = mock_warn.call_args[0][0] - self.assertIn("badkey1", warning_string) - self.assertIn("badkey2", warning_string) + self.assertEqual( + mock_warn.call_args[0][1], + ['SafeFunctionsTest:\nbadkey1', 'SafeFunctionsTest:\nbadkey2'], + ) self.assertEqual(result, {}) @@ -220,16 +222,18 @@ class SafeCacheFunctionsTest(ZulipTestCase): with patch('zerver.lib.cache.logger.warning') as mock_warn: safe_cache_set_many(items) mock_warn.assert_called_once() - warning_string = mock_warn.call_args[0][0] - self.assertIn("badkey1", warning_string) - self.assertIn("badkey2", warning_string) + self.assertEqual( + mock_warn.call_args[0][1], + ['SafeFunctionsTest:\nbadkey1', 'SafeFunctionsTest:\nbadkey2'], + ) with patch('zerver.lib.cache.logger.warning') as mock_warn: result = safe_cache_get_many(list(items.keys())) mock_warn.assert_called_once() - warning_string = mock_warn.call_args[0][0] - self.assertIn("badkey1", warning_string) - self.assertIn("badkey2", warning_string) + self.assertEqual( + mock_warn.call_args[0][1], + ['SafeFunctionsTest:\nbadkey1', 'SafeFunctionsTest:\nbadkey2'], + ) self.assertEqual(result, good_items) diff --git a/zerver/tests/test_decorators.py b/zerver/tests/test_decorators.py index be09ad797f..ebb35d1db6 100644 --- a/zerver/tests/test_decorators.py +++ b/zerver/tests/test_decorators.py @@ -276,8 +276,9 @@ class DecoratorTestCase(TestCase): api_result = my_webhook(request) # type: ignore[call-arg] # mypy doesn't seem to apply the decorator mock_warning.assert_called_with( - "User {} ({}) attempted to access API on wrong " - "subdomain ({})".format(webhook_bot_email, 'zulip', '')) + "User %s (%s) attempted to access API on wrong subdomain (%s)", + webhook_bot_email, 'zulip', '', + ) with mock.patch('logging.warning') as mock_warning: with self.assertRaisesRegex(JsonableError, @@ -286,8 +287,9 @@ class DecoratorTestCase(TestCase): api_result = my_webhook(request) # type: ignore[call-arg] # mypy doesn't seem to apply the decorator mock_warning.assert_called_with( - "User {} ({}) attempted to access API on wrong " - "subdomain ({})".format(webhook_bot_email, 'zulip', 'acme')) + "User %s (%s) attempted to access API on wrong subdomain (%s)", + webhook_bot_email, 'zulip', 'acme', + ) request.host = "zulip.testserver" # Test when content_type is application/json and request.body @@ -1321,8 +1323,9 @@ class TestValidateApiKey(ZulipTestCase): self.default_bot.email, api_key) mock_warning.assert_called_with( - "User {} ({}) attempted to access API on wrong " - "subdomain ({})".format(self.default_bot.email, 'zulip', '')) + "User %s (%s) attempted to access API on wrong subdomain (%s)", + self.default_bot.email, 'zulip', '', + ) with mock.patch('logging.warning') as mock_warning: with self.assertRaisesRegex(JsonableError, @@ -1331,8 +1334,9 @@ class TestValidateApiKey(ZulipTestCase): self.default_bot.email, api_key) mock_warning.assert_called_with( - "User {} ({}) attempted to access API on wrong " - "subdomain ({})".format(self.default_bot.email, 'zulip', 'acme')) + "User %s (%s) attempted to access API on wrong subdomain (%s)", + self.default_bot.email, 'zulip', 'acme', + ) def _change_is_active_field(self, profile: UserProfile, value: bool) -> None: profile.is_active = value @@ -1465,8 +1469,9 @@ class TestAuthenticatedJsonPostViewDecorator(ZulipTestCase): "Account is not associated with this " "subdomain") mock_warning.assert_called_with( - "User {} ({}) attempted to access API on wrong " - "subdomain ({})".format(email, 'zulip', '')) + "User %s (%s) attempted to access API on wrong subdomain (%s)", + email, 'zulip', '', + ) with mock.patch('logging.warning') as mock_warning, \ mock.patch('zerver.decorator.get_subdomain', return_value='acme'): @@ -1474,8 +1479,9 @@ class TestAuthenticatedJsonPostViewDecorator(ZulipTestCase): "Account is not associated with this " "subdomain") mock_warning.assert_called_with( - "User {} ({}) attempted to access API on wrong " - "subdomain ({})".format(email, 'zulip', 'acme')) + "User %s (%s) attempted to access API on wrong subdomain (%s)", + email, 'zulip', 'acme', + ) def test_authenticated_json_post_view_if_user_is_incoming_webhook(self) -> None: bot = self.example_user('webhook_bot') @@ -1520,8 +1526,9 @@ class TestAuthenticatedJsonViewDecorator(ZulipTestCase): "Account is not associated with this " "subdomain") mock_warning.assert_called_with( - "User {} ({}) attempted to access API on wrong " - "subdomain ({})".format(email, 'zulip', '')) + "User %s (%s) attempted to access API on wrong subdomain (%s)", + email, 'zulip', '', + ) with mock.patch('logging.warning') as mock_warning, \ mock.patch('zerver.decorator.get_subdomain', return_value='acme'): @@ -1529,8 +1536,9 @@ class TestAuthenticatedJsonViewDecorator(ZulipTestCase): "Account is not associated with this " "subdomain") mock_warning.assert_called_with( - "User {} ({}) attempted to access API on wrong " - "subdomain ({})".format(email, 'zulip', 'acme')) + "User %s (%s) attempted to access API on wrong subdomain (%s)", + email, 'zulip', 'acme', + ) def _do_test(self, user_email: str) -> HttpResponse: data = {"password": initial_password(user_email)} diff --git a/zerver/tests/test_email_mirror.py b/zerver/tests/test_email_mirror.py index 71d1682c7b..d5cf2eada7 100644 --- a/zerver/tests/test_email_mirror.py +++ b/zerver/tests/test_email_mirror.py @@ -556,8 +556,10 @@ class TestEmailMirrorMessagesWithAttachments(ZulipTestCase): with mock.patch('zerver.lib.email_mirror.logger.warning') as mock_warn: process_message(incoming_valid_message) - mock_warn.assert_called_with("Payload is not bytes (invalid attachment %s in message from %s)." % - ('some_attachment', self.example_email('hamlet'))) + mock_warn.assert_called_with( + "Payload is not bytes (invalid attachment %s in message from %s).", + 'some_attachment', self.example_email('hamlet'), + ) def test_receive_plaintext_and_html_prefer_text_html_options(self) -> None: user_profile = self.example_user('hamlet') diff --git a/zerver/tests/test_embedded_bot_system.py b/zerver/tests/test_embedded_bot_system.py index ecfd2d2ea2..81b272443b 100644 --- a/zerver/tests/test_embedded_bot_system.py +++ b/zerver/tests/test_embedded_bot_system.py @@ -86,4 +86,6 @@ class TestEmbeddedBotFailures(ZulipTestCase): content="@**{}** foo".format(bot_profile.full_name), topic_name="bar") logging_error_mock.assert_called_once_with( - "Error: User {} has bot with invalid embedded bot service invalid".format(bot_profile.id)) + "Error: User %s has bot with invalid embedded bot service %s", + bot_profile.id, "invalid", + ) diff --git a/zerver/tests/test_external.py b/zerver/tests/test_external.py index f5a1e09139..74b16d4466 100644 --- a/zerver/tests/test_external.py +++ b/zerver/tests/test_external.py @@ -120,5 +120,7 @@ class RateLimitTests(ZulipTestCase): side_effect=RateLimiterLockingException): result = self.send_api_message(user, "some stuff") self.assertEqual(result.status_code, 429) - mock_warn.assert_called_with("Deadlock trying to incr_ratelimit for RateLimitedUser:%s:api_by_user" - % (user.id,)) + mock_warn.assert_called_with( + "Deadlock trying to incr_ratelimit for %s", + "RateLimitedUser:%s:api_by_user" % (user.id,), + ) diff --git a/zerver/tests/test_home.py b/zerver/tests/test_home.py index 4223239cca..be34f86ea0 100644 --- a/zerver/tests/test_home.py +++ b/zerver/tests/test_home.py @@ -460,7 +460,9 @@ class HomeTest(ZulipTestCase): self.login_user(user_profile) with patch('logging.warning') as mock: result = self._get_home_page() - mock.assert_called_once_with('User %s has invalid pointer 999999' % (user_profile.id,)) + mock.assert_called_once_with( + 'User %s has invalid pointer %s', user_profile.id, 999999, + ) self._sanity_check(result) def test_topic_narrow(self) -> None: diff --git a/zerver/tests/test_push_notifications.py b/zerver/tests/test_push_notifications.py index 9e32560f11..0b7b260c19 100644 --- a/zerver/tests/test_push_notifications.py +++ b/zerver/tests/test_push_notifications.py @@ -720,15 +720,17 @@ class HandlePushNotificationTest(PushNotificationTest): self.user_profile.id, token) for _, _, token in gcm_devices: mock_info.assert_any_call( - "GCM: Sent %s as %s" % (token, message.id)) + "GCM: Sent %s as %s", token, message.id, + ) # Now test the unregistered case mock_apns.get_notification_result.return_value = ('Unregistered', 1234567) handle_push_notification(self.user_profile.id, missed_message) for _, _, token in apns_devices: mock_info.assert_any_call( - "APNs: Removing invalid/expired token %s (%s)" % - (token, "Unregistered")) + "APNs: Removing invalid/expired token %s (%s)", + token, "Unregistered", + ) self.assertEqual(RemotePushDeviceToken.objects.filter(kind=PushDeviceToken.APNS).count(), 0) def test_connection_error(self) -> None: @@ -978,9 +980,10 @@ class HandlePushNotificationTest(PushNotificationTest): with mock.patch('zerver.lib.push_notifications.logger.error') as mock_logger, \ mock.patch('zerver.lib.push_notifications.push_notifications_enabled', return_value = True) as mock_push_notifications: handle_push_notification(self.user_profile.id, missed_message) - mock_logger.assert_called_with("Could not find UserMessage with " - "message_id %s and user_id %s" % - (message_id, self.user_profile.id,)) + mock_logger.assert_called_with( + "Could not find UserMessage with message_id %s and user_id %s", + message_id, self.user_profile.id, + ) mock_push_notifications.assert_called_once() def test_user_message_soft_deactivated(self) -> None: @@ -1659,8 +1662,8 @@ class GCMSendTest(PushNotificationTest): data = self.get_gcm_data() send_android_push_notification_to_user(self.user_profile, data, {}) self.assertEqual(mock_info.call_count, 2) - c1 = call("GCM: Sent 1111 as 0") - c2 = call("GCM: Sent 2222 as 1") + c1 = call("GCM: Sent %s as %s", "1111", 0) + c2 = call("GCM: Sent %s as %s", "2222", 1) mock_info.assert_has_calls([c1, c2], any_order=True) mock_warning.assert_not_called() @@ -1672,8 +1675,9 @@ class GCMSendTest(PushNotificationTest): data = self.get_gcm_data() send_android_push_notification_to_user(self.user_profile, data, {}) - mock_warning.assert_called_once_with("GCM: Got canonical ref but it " - "already matches our ID 1!") + mock_warning.assert_called_once_with( + "GCM: Got canonical ref but it already matches our ID %s!", 1, + ) @mock.patch('zerver.lib.push_notifications.logger.warning') def test_canonical_pushdevice_not_present(self, mock_warning: mock.MagicMock, @@ -1697,7 +1701,7 @@ class GCMSendTest(PushNotificationTest): msg = ("GCM: Got canonical ref %s " "replacing %s but new ID not " "registered! Updating.") - mock_warning.assert_called_once_with(msg % (t2, t1)) + mock_warning.assert_called_once_with(msg, t2, t1) self.assertEqual(get_count('1111'), 0) self.assertEqual(get_count('3333'), 1) @@ -1722,7 +1726,8 @@ class GCMSendTest(PushNotificationTest): data = self.get_gcm_data() send_android_push_notification_to_user(self.user_profile, data, {}) mock_info.assert_called_once_with( - "GCM: Got canonical ref %s, dropping %s" % (new_token, old_token)) + "GCM: Got canonical ref %s, dropping %s", new_token, old_token, + ) self.assertEqual(get_count('1111'), 0) self.assertEqual(get_count('2222'), 1) @@ -1743,7 +1748,7 @@ class GCMSendTest(PushNotificationTest): data = self.get_gcm_data() send_android_push_notification_to_user(self.user_profile, data, {}) - mock_info.assert_called_once_with("GCM: Removing %s" % (token,)) + mock_info.assert_called_once_with("GCM: Removing %s", token) self.assertEqual(get_count('1111'), 0) @mock.patch('zerver.lib.push_notifications.logger.warning') @@ -1755,7 +1760,7 @@ class GCMSendTest(PushNotificationTest): data = self.get_gcm_data() send_android_push_notification_to_user(self.user_profile, data, {}) - c1 = call("GCM: Delivery to %s failed: Failed" % (token,)) + c1 = call("GCM: Delivery to %s failed: %s", token, "Failed") mock_warn.assert_has_calls([c1], any_order=True) class TestClearOnRead(ZulipTestCase): diff --git a/zerver/tests/test_queue_worker.py b/zerver/tests/test_queue_worker.py index 1d7475ff97..b64fef565e 100644 --- a/zerver/tests/test_queue_worker.py +++ b/zerver/tests/test_queue_worker.py @@ -414,9 +414,10 @@ class WorkerTest(ZulipTestCase): fake_client.queue.append(('email_mirror', data[0])) worker.start() self.assertEqual(mock_mirror_email.call_count, 4) - expected_warn = "Deadlock trying to incr_ratelimit for RateLimitedRealmMirror:%s" % ( - realm.string_id,) - mock_warn.assert_called_with(expected_warn) + mock_warn.assert_called_with( + "Deadlock trying to incr_ratelimit for %s", + "RateLimitedRealmMirror:%s" % (realm.string_id,), + ) def test_email_sending_worker_retries(self) -> None: """Tests the retry_send_email_failures decorator to make sure it @@ -501,7 +502,9 @@ class WorkerTest(ZulipTestCase): with patch('logging.warning') as logging_warning_mock: worker.start() logging_warning_mock.assert_called_once_with( - "Attempted to sign up already existing email to list: foo@bar.baz") + "Attempted to sign up already existing email to list: %s", + "foo@bar.baz", + ) def test_signups_bad_request(self) -> None: fake_client = self.FakeClient() diff --git a/zerver/tests/test_signup.py b/zerver/tests/test_signup.py index 98dcbab2b7..5e2755316f 100644 --- a/zerver/tests/test_signup.py +++ b/zerver/tests/test_signup.py @@ -2273,8 +2273,8 @@ class UserSignUpTest(InviteUserBase): self._assert_redirected_to(result, '/config-error/smtp') self.assertEqual( - err.call_args_list[0][0][0], - 'Error in accounts_home: uh oh' + err.call_args_list[0][0], + ('Error in accounts_home: %s', 'uh oh'), ) def test_bad_email_configuration_for_create_realm(self) -> None: @@ -2296,8 +2296,8 @@ class UserSignUpTest(InviteUserBase): self._assert_redirected_to(result, '/config-error/smtp') self.assertEqual( - err.call_args_list[0][0][0], - 'Error in create_realm: uh oh' + err.call_args_list[0][0], + ('Error in create_realm: %s', 'uh oh'), ) def test_user_default_language_and_timezone(self) -> None: @@ -3310,7 +3310,10 @@ class UserSignUpTest(InviteUserBase): # Pass HTTP_HOST for the target subdomain HTTP_HOST=subdomain + ".testserver") self.assertEqual(result.status_code, 200) - mock_warning.assert_called_once_with("New account email newuser@zulip.com could not be found in LDAP") + mock_warning.assert_called_once_with( + "New account email %s could not be found in LDAP", + "newuser@zulip.com", + ) result = self.submit_reg_form_for_user(email, password, @@ -3396,7 +3399,10 @@ class UserSignUpTest(InviteUserBase): # Pass HTTP_HOST for the target subdomain HTTP_HOST=subdomain + ".testserver") self.assertEqual(result.status_code, 200) - mock_warning.assert_called_once_with("New account email nonexistent@zulip.com could not be found in LDAP") + mock_warning.assert_called_once_with( + "New account email %s could not be found in LDAP", + "nonexistent@zulip.com", + ) result = self.submit_reg_form_for_user(email, password, diff --git a/zerver/tornado/event_queue.py b/zerver/tornado/event_queue.py index 680099a159..be53b4f00c 100644 --- a/zerver/tornado/event_queue.py +++ b/zerver/tornado/event_queue.py @@ -207,9 +207,9 @@ class ClientDescriptor: clear_descriptor_by_handler_id(self.current_handler_id, None) clear_handler_by_id(self.current_handler_id) if client_closed: - logging.info("Client disconnected for queue %s (%s via %s)" % - (self.event_queue.id, self.user_profile_id, - self.current_client_name)) + logging.info("Client disconnected for queue %s (%s via %s)", + self.event_queue.id, self.user_profile_id, + self.current_client_name) self.current_handler_id = None self.current_client_name = None if self._timeout_handle is not None: @@ -430,10 +430,10 @@ def gc_event_queues(port: int) -> None: do_gc_event_queues(to_remove, affected_users, affected_realms) if settings.PRODUCTION: - logging.info(('Tornado %d removed %d expired event queues owned by %d users in %.3fs.' + - ' Now %d active queues, %s') - % (port, len(to_remove), len(affected_users), time.time() - start, - len(clients), handler_stats_string())) + logging.info('Tornado %d removed %d expired event queues owned by %d users in %.3fs.' + ' Now %d active queues, %s', + port, len(to_remove), len(affected_users), time.time() - start, + len(clients), handler_stats_string()) statsd.gauge('tornado.active_queues', len(clients)) statsd.gauge('tornado.active_users', len(user_clients)) @@ -454,8 +454,8 @@ def dump_event_queues(port: int) -> None: ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.items()], stored_queues) - logging.info('Tornado %d dumped %d event queues in %.3fs' - % (port, len(clients), time.time() - start)) + logging.info('Tornado %d dumped %d event queues in %.3fs', + port, len(clients), time.time() - start) def load_event_queues(port: int) -> None: global clients @@ -481,8 +481,8 @@ def load_event_queues(port: int) -> None: add_to_client_dicts(client) - logging.info('Tornado %d loaded %d event queues in %.3fs' - % (port, len(clients), time.time() - start)) + logging.info('Tornado %d loaded %d event queues in %.3fs', + port, len(clients), time.time() - start) def send_restart_events(immediate: bool=False) -> None: event: Dict[str, Any] = dict(type='restart', server_generation=settings.SERVER_GENERATION) @@ -572,8 +572,8 @@ def fetch_events(query: Mapping[str, Any]) -> Dict[str, Any]: # After this point, dont_block=False, the queue is empty, and we # have a pre-existing queue, so we wait for new events. if was_connected: - logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_id, - client_type_name)) + logging.info("Disconnected handler for queue %s (%s/%s)", + queue_id, user_profile_id, client_type_name) except JsonableError as e: return dict(type="error", exception=e) @@ -608,8 +608,8 @@ def request_event_queue(user_profile: UserProfile, user_client: Client, apply_ma data=req) except requests.adapters.ConnectionError: logging.error('Tornado server does not seem to be running, check %s ' - 'and %s for more information.' % - (settings.ERROR_FILE_LOG_PATH, "tornado.log")) + 'and %s for more information.', + settings.ERROR_FILE_LOG_PATH, "tornado.log") raise requests.adapters.ConnectionError( "Django cannot connect to Tornado server (%s); try restarting" % (tornado_uri,)) @@ -1083,14 +1083,16 @@ def process_notification(notice: Mapping[str, Any]) -> None: process_presence_event(event, cast(Iterable[int], users)) else: process_event(event, cast(Iterable[int], users)) - logging.debug("Tornado: Event %s for %s users took %sms" % ( - event['type'], len(users), int(1000 * (time.time() - start_time)))) + logging.debug( + "Tornado: Event %s for %s users took %sms", + event['type'], len(users), int(1000 * (time.time() - start_time)), + ) def get_wrapped_process_notification(queue_name: str) -> Callable[[Dict[str, Any]], None]: def failure_processor(notice: Dict[str, Any]) -> None: logging.error( - "Maximum retries exceeded for Tornado notice:%s\nStack trace:\n%s\n" % ( - notice, traceback.format_exc())) + "Maximum retries exceeded for Tornado notice:%s\nStack trace:\n%s\n", + notice, traceback.format_exc()) def wrapped_process_notification(notice: Dict[str, Any]) -> None: try: diff --git a/zerver/tornado/ioloop_logging.py b/zerver/tornado/ioloop_logging.py index 19e5f5b88a..497475ddd5 100644 --- a/zerver/tornado/ioloop_logging.py +++ b/zerver/tornado/ioloop_logging.py @@ -68,8 +68,8 @@ class InstrumentedPoll: if total > 0: percent_busy = 100 * (1 - in_poll / total) if settings.PRODUCTION: - logging.info('Tornado %s %5.1f%% busy over the past %4.1f seconds' - % (logging_data.get('port', 'unknown'), percent_busy, total)) + logging.info('Tornado %s %5.1f%% busy over the past %4.1f seconds', + logging_data.get('port', 'unknown'), percent_busy, total) self._last_print = t1 return result diff --git a/zerver/views/auth.py b/zerver/views/auth.py index f68ce4357a..a11dc9a363 100644 --- a/zerver/views/auth.py +++ b/zerver/views/auth.py @@ -461,8 +461,8 @@ def start_social_login(request: HttpRequest, backend: str, extra_arg: Optional[s # This backend requires the name of the IdP (from the list of configured ones) # to be passed as the parameter. if not extra_arg or extra_arg not in settings.SOCIAL_AUTH_SAML_ENABLED_IDPS: - logging.info("Attempted to initiate SAML authentication with wrong idp argument: {}" - .format(extra_arg)) + logging.info("Attempted to initiate SAML authentication with wrong idp argument: %s", + extra_arg) return redirect_to_config_error("saml") extra_url_params = {'idp': extra_arg} @@ -485,8 +485,8 @@ def start_social_signup(request: HttpRequest, backend: str, extra_arg: Optional[ return result if not extra_arg or extra_arg not in settings.SOCIAL_AUTH_SAML_ENABLED_IDPS: - logging.info("Attempted to initiate SAML authentication with wrong idp argument: {}" - .format(extra_arg)) + logging.info("Attempted to initiate SAML authentication with wrong idp argument: %s", + extra_arg) return redirect_to_config_error("saml") extra_url_params = {'idp': extra_arg} return oauth_redirect_to_root(request, backend_url, 'social', is_signup=True, @@ -502,13 +502,13 @@ def log_into_subdomain(request: HttpRequest, token: str) -> HttpResponse: result data that has been stored in redis, associated with this token. """ if not has_api_key_format(token): # The tokens are intended to have the same format as API keys. - logging.warning("log_into_subdomain: Malformed token given: %s" % (token,)) + logging.warning("log_into_subdomain: Malformed token given: %s", token) return HttpResponse(status=400) try: result = ExternalAuthResult(login_token=token) except ExternalAuthResult.InvalidTokenError: - logging.warning("log_into_subdomain: Invalid token given: %s" % (token,)) + logging.warning("log_into_subdomain: Invalid token given: %s", token) return render(request, 'zerver/log_into_subdomain_token_invalid.html', status=400) subdomain = get_subdomain(request) diff --git a/zerver/views/home.py b/zerver/views/home.py index c3f37b3f4e..5bce1bbb97 100644 --- a/zerver/views/home.py +++ b/zerver/views/home.py @@ -226,7 +226,7 @@ def home_real(request: HttpRequest) -> HttpResponse: latest_read = get_usermessage_by_message_id(user_profile, user_profile.pointer) if latest_read is None: # Don't completely fail if your saved pointer ID is invalid - logging.warning("User %s has invalid pointer %s" % (user_profile.id, user_profile.pointer)) + logging.warning("User %s has invalid pointer %s", user_profile.id, user_profile.pointer) furthest_read_time = sent_time_in_epoch_seconds(latest_read) # We pick a language for the user as follows: diff --git a/zerver/views/registration.py b/zerver/views/registration.py index d1c40e1229..f785cc68b8 100644 --- a/zerver/views/registration.py +++ b/zerver/views/registration.py @@ -136,7 +136,7 @@ def accounts_register(request: HttpRequest) -> HttpResponse: try: ldap_username = backend.django_to_ldap_username(email) except ZulipLDAPExceptionNoMatchingLDAPUser: - logging.warning("New account email %s could not be found in LDAP" % (email,)) + logging.warning("New account email %s could not be found in LDAP", email) break # Note that this `ldap_user` object is not a @@ -353,8 +353,10 @@ def accounts_register(request: HttpRequest) -> HttpResponse: use_dummy_backend=True) if return_data.get('invalid_subdomain'): # By construction, this should never happen. - logging.error("Subdomain mismatch in registration %s: %s" % ( - realm.subdomain, user_profile.delivery_email,)) + logging.error( + "Subdomain mismatch in registration %s: %s", + realm.subdomain, user_profile.delivery_email, + ) return redirect('/') return login_and_go_to_home(request, auth_result) @@ -464,7 +466,7 @@ def create_realm(request: HttpRequest, creation_key: Optional[str]=None) -> Http try: send_confirm_registration_email(email, activation_url, request.LANGUAGE_CODE) except smtplib.SMTPException as e: - logging.error('Error in create_realm: %s' % (str(e),)) + logging.error('Error in create_realm: %s', str(e)) return HttpResponseRedirect("/config-error/smtp") if key_record is not None: @@ -505,7 +507,7 @@ def accounts_home(request: HttpRequest, multiuse_object_key: Optional[str]="", try: send_confirm_registration_email(email, activation_url, request.LANGUAGE_CODE) except smtplib.SMTPException as e: - logging.error('Error in accounts_home: %s' % (str(e),)) + logging.error('Error in accounts_home: %s', str(e)) return HttpResponseRedirect("/config-error/smtp") return HttpResponseRedirect(reverse('signup_send_confirm', kwargs={'email': email})) diff --git a/zerver/views/report.py b/zerver/views/report.py index 956789ee0e..e8bb21b18c 100644 --- a/zerver/views/report.py +++ b/zerver/views/report.py @@ -163,14 +163,15 @@ def report_csp_violations(request: HttpRequest, "Blocked URI('%s'), Original Policy('%s'), " "Violated Directive('%s'), Effective Directive('%s'), " "Disposition('%s'), Referrer('%s'), " - "Status Code('%s'), Script Sample('%s')" % (get_attr('document-uri'), - get_attr('blocked-uri'), - get_attr('original-policy'), - get_attr('violated-directive'), - get_attr('effective-directive'), - get_attr('disposition'), - get_attr('referrer'), - get_attr('status-code'), - get_attr('script-sample'))) + "Status Code('%s'), Script Sample('%s')", + get_attr('document-uri'), + get_attr('blocked-uri'), + get_attr('original-policy'), + get_attr('violated-directive'), + get_attr('effective-directive'), + get_attr('disposition'), + get_attr('referrer'), + get_attr('status-code'), + get_attr('script-sample')) return json_success() diff --git a/zerver/webhooks/teamcity/view.py b/zerver/webhooks/teamcity/view.py index b322741aa0..3afc97e5cd 100644 --- a/zerver/webhooks/teamcity/view.py +++ b/zerver/webhooks/teamcity/view.py @@ -119,7 +119,7 @@ def api_teamcity_webhook(request: HttpRequest, user_profile: UserProfile, if teamcity_user is None: # We can't figure out who started this build - there's nothing we can do here. logging.info("Teamcity webhook couldn't find a matching Zulip user for " - "Teamcity user '%s' or '%s'" % (teamcity_fullname, teamcity_shortname)) + "Teamcity user '%s' or '%s'", teamcity_fullname, teamcity_shortname) return json_success() body = "Your personal build for {}".format(body) diff --git a/zerver/worker/queue_processors.py b/zerver/worker/queue_processors.py index 0022c2d381..f04774163b 100644 --- a/zerver/worker/queue_processors.py +++ b/zerver/worker/queue_processors.py @@ -255,8 +255,10 @@ class SignupWorker(QueueProcessingWorker): def consume(self, data: Dict[str, Any]) -> None: # TODO: This is the only implementation with Dict cf Mapping; should we simplify? user_profile = get_user_profile_by_id(data['user_id']) - logging.info("Processing signup for user %s in realm %s" % ( - user_profile.id, user_profile.realm.string_id)) + logging.info( + "Processing signup for user %s in realm %s", + user_profile.id, user_profile.realm.string_id, + ) if settings.MAILCHIMP_API_KEY and settings.PRODUCTION: endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \ (settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID) @@ -266,8 +268,8 @@ class SignupWorker(QueueProcessingWorker): params['status'] = 'subscribed' r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10) if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists': - logging.warning("Attempted to sign up already existing email to list: %s" % - (data['email_address'],)) + logging.warning("Attempted to sign up already existing email to list: %s", + data['email_address']) elif r.status_code == 400: retry_event(self.queue_name, data, lambda e: r.raise_for_status()) else: @@ -288,7 +290,7 @@ class ConfirmationEmailWorker(QueueProcessingWorker): return referrer = get_user_profile_by_id(data["referrer_id"]) - logger.info("Sending invitation for realm %s to %s" % (referrer.realm.string_id, invitee.email)) + logger.info("Sending invitation for realm %s to %s", referrer.realm.string_id, invitee.email) activate_url = do_send_confirmation_email(invitee, referrer) # queue invitation reminder @@ -378,7 +380,7 @@ class UserActivityIntervalWorker(QueueProcessingWorker): @assign_queue('user_presence') class UserPresenceWorker(QueueProcessingWorker): def consume(self, event: Mapping[str, Any]) -> None: - logging.debug("Received presence event: %s" % (event,),) + logging.debug("Received presence event: %s", event) user_profile = get_user_profile_by_id(event["user_profile_id"]) client = get_client(event["client"]) log_time = timestamp_to_datetime(event["time"]) @@ -406,7 +408,7 @@ class MissedMessageWorker(QueueProcessingWorker): batch_start_by_recipient: Dict[int, float] = {} def consume(self, event: Dict[str, Any]) -> None: - logging.debug("Received missedmessage_emails event: %s" % (event,)) + logging.debug("Received missedmessage_emails event: %s", event) # When we process an event, just put it into the queue and ensure we have a timer going. user_profile_id = event['user_profile_id'] @@ -435,8 +437,8 @@ class MissedMessageWorker(QueueProcessingWorker): if current_time - timestamp < self.BATCH_DURATION: continue events = self.events_by_recipient[user_profile_id] - logging.info("Batch-processing %s missedmessage_emails events for user %s" % - (len(events), user_profile_id)) + logging.info("Batch-processing %s missedmessage_emails events for user %s", + len(events), user_profile_id) handle_missedmessage_emails(user_profile_id, events) del self.events_by_recipient[user_profile_id] del self.batch_start_by_recipient[user_profile_id] @@ -481,14 +483,14 @@ class PushNotificationsWorker(QueueProcessingWorker): # nocoverage except PushNotificationBouncerRetryLaterError: def failure_processor(event: Dict[str, Any]) -> None: logger.warning( - "Maximum retries exceeded for trigger:%s event:push_notification" % ( - event['user_profile_id'],)) + "Maximum retries exceeded for trigger:%s event:push_notification", + event['user_profile_id']) retry_event(self.queue_name, event, failure_processor) @assign_queue('error_reports') class ErrorReporter(QueueProcessingWorker): def consume(self, event: Mapping[str, Any]) -> None: - logging.info("Processing traceback with type %s for %s" % (event['type'], event.get('user_email'))) + logging.info("Processing traceback with type %s for %s", event['type'], event.get('user_email')) if settings.ERROR_REPORTING: do_report_error(event['report']['host'], event['type'], event['report']) @@ -501,7 +503,7 @@ class SlowQueryWorker(LoopQueueProcessingWorker): def consume_batch(self, slow_query_events: List[Dict[str, Any]]) -> None: for event in slow_query_events: - logging.info("Slow query: %s" % (event["query"],)) + logging.info("Slow query: %s", event["query"]) if settings.SLOW_QUERY_LOGS_STREAM is None: return @@ -535,7 +537,7 @@ class DigestWorker(QueueProcessingWorker): # nocoverage # Who gets a digest is entirely determined by the enqueue_digest_emails # management command, not here. def consume(self, event: Mapping[str, Any]) -> None: - logging.info("Received digest event: %s" % (event,)) + logging.info("Received digest event: %s", event) handle_digest_email(event["user_profile_id"], event["cutoff"]) @assign_queue('email_mirror') @@ -551,8 +553,8 @@ class MirrorWorker(QueueProcessingWorker): except RateLimited: msg = email.message_from_string(event["message"]) logger.warning("MirrorWorker: Rejecting an email from: %s " - "to realm: %s - rate limited." - % (msg['From'], recipient_realm.name)) + "to realm: %s - rate limited.", + msg['From'], recipient_realm.name) return mirror_email(email.message_from_string(event["message"]), @@ -567,7 +569,7 @@ class TestWorker(QueueProcessingWorker): def consume(self, event: Mapping[str, Any]) -> None: # nocoverage fn = settings.ZULIP_WORKER_TEST_FILE message = ujson.dumps(event) - logging.info("TestWorker should append this message to %s: %s" % (fn, message)) + logging.info("TestWorker should append this message to %s: %s", fn, message) with open(fn, 'a') as f: f.write(message + '\n') @@ -635,8 +637,10 @@ class EmbeddedBotWorker(QueueProcessingWorker): for service in services: bot_handler = get_bot_handler(str(service.name)) if bot_handler is None: - logging.error("Error: User %s has bot with invalid embedded bot service %s" % ( - user_profile_id, service.name)) + logging.error( + "Error: User %s has bot with invalid embedded bot service %s", + user_profile_id, service.name, + ) continue try: if hasattr(bot_handler, 'initialize'): @@ -674,8 +678,8 @@ class DeferredWorker(QueueProcessingWorker): except PushNotificationBouncerRetryLaterError: def failure_processor(event: Dict[str, Any]) -> None: logger.warning( - "Maximum retries exceeded for trigger:%s event:clear_push_device_tokens" % ( - event['user_profile_id'],)) + "Maximum retries exceeded for trigger:%s event:clear_push_device_tokens", + event['user_profile_id']) retry_event(self.queue_name, event, failure_processor) elif event['type'] == 'realm_export': start = time.time() @@ -693,8 +697,10 @@ class DeferredWorker(QueueProcessingWorker): failed_timestamp=timezone_now().timestamp() )) export_event.save(update_fields=['extra_data']) - logging.error("Data export for %s failed after %s" % ( - user_profile.realm.string_id, time.time() - start)) + logging.error( + "Data export for %s failed after %s", + user_profile.realm.string_id, time.time() - start, + ) notify_realm_export(user_profile) return @@ -720,5 +726,7 @@ class DeferredWorker(QueueProcessingWorker): # For future frontend use, also notify administrator # clients that the export happened. notify_realm_export(user_profile) - logging.info("Completed data export for %s in %s" % ( - user_profile.realm.string_id, time.time() - start)) + logging.info( + "Completed data export for %s in %s", + user_profile.realm.string_id, time.time() - start, + ) diff --git a/zilencer/management/commands/profile_request.py b/zilencer/management/commands/profile_request.py index 7ea60dd90e..b2e1d86df7 100644 --- a/zilencer/management/commands/profile_request.py +++ b/zilencer/management/commands/profile_request.py @@ -45,7 +45,7 @@ def profile_request(request: HttpRequest) -> HttpResponse: with tempfile.NamedTemporaryFile(prefix='profile.data.', delete=False) as stats_file: prof.dump_stats(stats_file.name) request_logger.process_response(request, ret) - logging.info("Profiling data written to {}".format(stats_file.name)) + logging.info("Profiling data written to %s", stats_file.name) return ret class Command(ZulipBaseCommand): diff --git a/zilencer/views.py b/zilencer/views.py index 21f848bee6..7630f60360 100644 --- a/zilencer/views.py +++ b/zilencer/views.py @@ -176,8 +176,10 @@ def batch_create_table_data(server: RemoteZulipServer, model: Any, try: model.objects.bulk_create(row_objects[:BATCH_SIZE]) except IntegrityError: - logging.warning("Invalid data saving %s for server %s/%s" % ( - model._meta.db_table, server.hostname, server.uuid)) + logging.warning( + "Invalid data saving %s for server %s/%s", + model._meta.db_table, server.hostname, server.uuid, + ) raise JsonableError(_("Invalid data.")) row_objects = row_objects[BATCH_SIZE:] diff --git a/zproject/backends.py b/zproject/backends.py index 771af83554..d27baf3947 100644 --- a/zproject/backends.py +++ b/zproject/backends.py @@ -433,7 +433,7 @@ class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend): # This is possible, but strange, so worth logging a warning about. # We can't translate the email to a unique username, # so we don't do anything else here. - logging.warning("Multiple users with email {} found in LDAP.".format(username)) + logging.warning("Multiple users with email %s found in LDAP.", username) result = username if _LDAPUser(self, result).attrs is None: @@ -509,8 +509,8 @@ class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend): user.avatar_hash = user_avatar_content_hash(ldap_avatar) user.save(update_fields=["avatar_hash"]) else: - logging.warning("Could not parse %s field for user %s" % - (avatar_attr_name, user.id)) + logging.warning("Could not parse %s field for user %s", + avatar_attr_name, user.id) def is_account_control_disabled_user(self, ldap_user: _LDAPUser) -> bool: """Implements the userAccountControl check for whether a user has been @@ -761,14 +761,14 @@ class ZulipLDAPUserPopulator(ZulipLDAPAuthBackendBase): user_disabled_in_ldap = self.is_account_control_disabled_user(ldap_user) if user_disabled_in_ldap: if user.is_active: - logging.info("Deactivating user %s because they are disabled in LDAP." % - (user.delivery_email,)) + logging.info("Deactivating user %s because they are disabled in LDAP.", + user.delivery_email) do_deactivate_user(user) # Do an early return to avoid trying to sync additional data. return (user, built) elif not user.is_active: - logging.info("Reactivating user %s because they are not disabled in LDAP." % - (user.delivery_email,)) + logging.info("Reactivating user %s because they are not disabled in LDAP.", + user.delivery_email) do_reactivate_user(user) self.sync_avatar_from_ldap(user, ldap_user) @@ -801,10 +801,10 @@ def sync_user_from_ldap(user_profile: UserProfile, logger: logging.Logger) -> bo except ZulipLDAPExceptionNoMatchingLDAPUser: if settings.LDAP_DEACTIVATE_NON_MATCHING_USERS: do_deactivate_user(user_profile) - logger.info("Deactivated non-matching user: %s" % (user_profile.delivery_email,)) + logger.info("Deactivated non-matching user: %s", user_profile.delivery_email) return True elif user_profile.is_active: - logger.warning("Did not find %s in LDAP." % (user_profile.delivery_email,)) + logger.warning("Did not find %s in LDAP.", user_profile.delivery_email) return False # What one would expect to see like to do here is just a call to @@ -824,7 +824,7 @@ def sync_user_from_ldap(user_profile: UserProfile, logger: logging.Logger) -> bo # making this flow possible in a more directly supported fashion. updated_user = ZulipLDAPUser(backend, ldap_username, realm=user_profile.realm).populate_user() if updated_user: - logger.info("Updated %s." % (user_profile.delivery_email,)) + logger.info("Updated %s.", user_profile.delivery_email) return True raise PopulateUserLDAPError("populate_user unexpectedly returned {}".format(updated_user)) @@ -1075,8 +1075,8 @@ def social_associate_user_helper(backend: BaseAuth, return_data: Dict[str, Any], if verified_emails_length == 0: # TODO: Provide a nice error message screen to the user # for this case, rather than just logging a warning. - logging.warning("Social auth (%s) failed because user has no verified emails" % - (backend.auth_backend_name,)) + logging.warning("Social auth (%s) failed because user has no verified emails", + backend.auth_backend_name) return_data["email_not_verified"] = True return None @@ -1114,8 +1114,8 @@ def social_associate_user_helper(backend: BaseAuth, return_data: Dict[str, Any], # end up with a wrong email associated with the account. The below code # takes care of that. logging.warning("Social auth (%s) failed because user has no verified" - " emails associated with the account" % - (backend.auth_backend_name,)) + " emails associated with the account", + backend.auth_backend_name) return_data["email_not_associated"] = True return None @@ -1230,7 +1230,7 @@ def social_auth_finish(backend: Any, # In case of invalid email, we will end up on registration page. # This seems better than redirecting to login page. logging.warning( - "{} got invalid email argument.".format(backend.auth_backend_name) + "%s got invalid email argument.", backend.auth_backend_name, ) return None @@ -1504,7 +1504,7 @@ class SAMLAuthBackend(SocialAuthMixin, SAMLAuth): except KeyError as e: # If the above raise KeyError, it means invalid or no idp was specified, # we should log that and redirect to the login page. - logging.info("/login/saml/ : Bad idp param: KeyError: {}.".format(e)) + logging.info("/login/saml/ : Bad idp param: KeyError: %s.", str(e)) return reverse('zerver.views.auth.login_page', kwargs = {'template_name': 'zerver/login.html'}) @@ -1575,14 +1575,14 @@ class SAMLAuthBackend(SocialAuthMixin, SAMLAuth): subdomain = relayed_params.get("subdomain") if idp_name is None or subdomain is None: error_msg = "Missing idp or subdomain value in relayed_params in SAML auth_complete: %s" - logging.info(error_msg % (dict(subdomain=subdomain, idp=idp_name),)) + logging.info(error_msg, dict(subdomain=subdomain, idp=idp_name)) return None idp_valid = self.validate_idp_for_subdomain(idp_name, subdomain) if not idp_valid: error_msg = "User authenticated with IdP %s but this provider is not " + \ "enabled for this realm %s." - logging.info(error_msg % (idp_name, subdomain)) + logging.info(error_msg, idp_name, subdomain) return None result = None diff --git a/zproject/email_backends.py b/zproject/email_backends.py index 0c5257778e..4b86e1c885 100644 --- a/zproject/email_backends.py +++ b/zproject/email_backends.py @@ -94,5 +94,5 @@ class EmailLogBackEnd(BaseEmailBackend): if settings.DEVELOPMENT_LOG_EMAILS: self.log_email(email) email_log_url = settings.ROOT_DOMAIN_URI + "/emails" - logging.info("Emails sent in development are available at %s" % (email_log_url,)) + logging.info("Emails sent in development are available at %s", email_log_url) return len(email_messages)