mirror of https://github.com/zulip/zulip.git
logging: Pass format arguments to logging.
https://docs.python.org/3/howto/logging.html#optimization Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
parent
82f629091a
commit
bdc365d0fe
|
@ -97,13 +97,13 @@ def process_count_stat(stat: CountStat, fill_to_time: datetime,
|
||||||
fill_state = FillState.objects.create(property=stat.property,
|
fill_state = FillState.objects.create(property=stat.property,
|
||||||
end_time=currently_filled,
|
end_time=currently_filled,
|
||||||
state=FillState.DONE)
|
state=FillState.DONE)
|
||||||
logger.info("INITIALIZED %s %s" % (stat.property, currently_filled))
|
logger.info("INITIALIZED %s %s", stat.property, currently_filled)
|
||||||
elif fill_state.state == FillState.STARTED:
|
elif fill_state.state == FillState.STARTED:
|
||||||
logger.info("UNDO START %s %s" % (stat.property, fill_state.end_time))
|
logger.info("UNDO START %s %s", stat.property, fill_state.end_time)
|
||||||
do_delete_counts_at_hour(stat, fill_state.end_time)
|
do_delete_counts_at_hour(stat, fill_state.end_time)
|
||||||
currently_filled = fill_state.end_time - time_increment
|
currently_filled = fill_state.end_time - time_increment
|
||||||
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
|
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
|
||||||
logger.info("UNDO DONE %s" % (stat.property,))
|
logger.info("UNDO DONE %s", stat.property)
|
||||||
elif fill_state.state == FillState.DONE:
|
elif fill_state.state == FillState.DONE:
|
||||||
currently_filled = fill_state.end_time
|
currently_filled = fill_state.end_time
|
||||||
else:
|
else:
|
||||||
|
@ -113,21 +113,21 @@ def process_count_stat(stat: CountStat, fill_to_time: datetime,
|
||||||
for dependency in stat.dependencies:
|
for dependency in stat.dependencies:
|
||||||
dependency_fill_time = last_successful_fill(dependency)
|
dependency_fill_time = last_successful_fill(dependency)
|
||||||
if dependency_fill_time is None:
|
if dependency_fill_time is None:
|
||||||
logger.warning("DependentCountStat %s run before dependency %s." %
|
logger.warning("DependentCountStat %s run before dependency %s.",
|
||||||
(stat.property, dependency))
|
stat.property, dependency)
|
||||||
return
|
return
|
||||||
fill_to_time = min(fill_to_time, dependency_fill_time)
|
fill_to_time = min(fill_to_time, dependency_fill_time)
|
||||||
|
|
||||||
currently_filled = currently_filled + time_increment
|
currently_filled = currently_filled + time_increment
|
||||||
while currently_filled <= fill_to_time:
|
while currently_filled <= fill_to_time:
|
||||||
logger.info("START %s %s" % (stat.property, currently_filled))
|
logger.info("START %s %s", stat.property, currently_filled)
|
||||||
start = time.time()
|
start = time.time()
|
||||||
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
|
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
|
||||||
do_fill_count_stat_at_hour(stat, currently_filled, realm)
|
do_fill_count_stat_at_hour(stat, currently_filled, realm)
|
||||||
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
|
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
|
||||||
end = time.time()
|
end = time.time()
|
||||||
currently_filled = currently_filled + time_increment
|
currently_filled = currently_filled + time_increment
|
||||||
logger.info("DONE %s (%dms)" % (stat.property, (end-start)*1000))
|
logger.info("DONE %s (%dms)", stat.property, (end-start)*1000)
|
||||||
|
|
||||||
def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None:
|
def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None:
|
||||||
fill_state.end_time = end_time
|
fill_state.end_time = end_time
|
||||||
|
@ -142,8 +142,8 @@ def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime, realm: Optio
|
||||||
timer = time.time()
|
timer = time.time()
|
||||||
assert(stat.data_collector.pull_function is not None)
|
assert(stat.data_collector.pull_function is not None)
|
||||||
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time, realm)
|
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time, realm)
|
||||||
logger.info("%s run pull_function (%dms/%sr)" %
|
logger.info("%s run pull_function (%dms/%sr)",
|
||||||
(stat.property, (time.time()-timer)*1000, rows_added))
|
stat.property, (time.time()-timer)*1000, rows_added)
|
||||||
do_aggregate_to_summary_table(stat, end_time, realm)
|
do_aggregate_to_summary_table(stat, end_time, realm)
|
||||||
|
|
||||||
def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None:
|
def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None:
|
||||||
|
@ -190,8 +190,10 @@ def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime,
|
||||||
start = time.time()
|
start = time.time()
|
||||||
cursor.execute(realmcount_query, {'end_time': end_time})
|
cursor.execute(realmcount_query, {'end_time': end_time})
|
||||||
end = time.time()
|
end = time.time()
|
||||||
logger.info("%s RealmCount aggregation (%dms/%sr)" % (
|
logger.info(
|
||||||
stat.property, (end - start) * 1000, cursor.rowcount))
|
"%s RealmCount aggregation (%dms/%sr)",
|
||||||
|
stat.property, (end - start) * 1000, cursor.rowcount,
|
||||||
|
)
|
||||||
|
|
||||||
if realm is None:
|
if realm is None:
|
||||||
# Aggregate into InstallationCount. Only run if we just
|
# Aggregate into InstallationCount. Only run if we just
|
||||||
|
@ -213,8 +215,10 @@ def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime,
|
||||||
start = time.time()
|
start = time.time()
|
||||||
cursor.execute(installationcount_query, {'end_time': end_time})
|
cursor.execute(installationcount_query, {'end_time': end_time})
|
||||||
end = time.time()
|
end = time.time()
|
||||||
logger.info("%s InstallationCount aggregation (%dms/%sr)" % (
|
logger.info(
|
||||||
stat.property, (end - start) * 1000, cursor.rowcount))
|
"%s InstallationCount aggregation (%dms/%sr)",
|
||||||
|
stat.property, (end - start) * 1000, cursor.rowcount,
|
||||||
|
)
|
||||||
|
|
||||||
cursor.close()
|
cursor.close()
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ def compute_stats(log_level: int) -> None:
|
||||||
total_counts[client_name] += count
|
total_counts[client_name] += count
|
||||||
total_user_counts[email] += count
|
total_user_counts[email] += count
|
||||||
|
|
||||||
logging.debug("%40s | %10s | %s" % ("User", "Messages", "Percentage Zulip"))
|
logging.debug("%40s | %10s | %s", "User", "Messages", "Percentage Zulip")
|
||||||
top_percents: Dict[int, float] = {}
|
top_percents: Dict[int, float] = {}
|
||||||
for size in [10, 25, 50, 100, 200, len(total_user_counts.keys())]:
|
for size in [10, 25, 50, 100, 200, len(total_user_counts.keys())]:
|
||||||
top_percents[size] = 0.0
|
top_percents[size] = 0.0
|
||||||
|
@ -56,18 +56,18 @@ def compute_stats(log_level: int) -> None:
|
||||||
if i < size:
|
if i < size:
|
||||||
top_percents[size] += (percent_zulip * 1.0 / size)
|
top_percents[size] += (percent_zulip * 1.0 / size)
|
||||||
|
|
||||||
logging.debug("%40s | %10s | %s%%" % (email, total_user_counts[email],
|
logging.debug("%40s | %10s | %s%%", email, total_user_counts[email],
|
||||||
percent_zulip))
|
percent_zulip)
|
||||||
|
|
||||||
logging.info("")
|
logging.info("")
|
||||||
for size in sorted(top_percents.keys()):
|
for size in sorted(top_percents.keys()):
|
||||||
logging.info("Top %6s | %s%%" % (size, round(top_percents[size], 1)))
|
logging.info("Top %6s | %s%%", size, round(top_percents[size], 1))
|
||||||
|
|
||||||
grand_total = sum(total_counts.values())
|
grand_total = sum(total_counts.values())
|
||||||
print(grand_total)
|
print(grand_total)
|
||||||
logging.info("%15s | %s" % ("Client", "Percentage"))
|
logging.info("%15s | %s", "Client", "Percentage")
|
||||||
for client in total_counts.keys():
|
for client in total_counts.keys():
|
||||||
logging.info("%15s | %s%%" % (client, round(100. * total_counts[client] / grand_total, 1)))
|
logging.info("%15s | %s%%", client, round(100. * total_counts[client] / grand_total, 1))
|
||||||
|
|
||||||
class Command(BaseCommand):
|
class Command(BaseCommand):
|
||||||
help = "Compute statistics on MIT Zephyr usage."
|
help = "Compute statistics on MIT Zephyr usage."
|
||||||
|
|
|
@ -258,7 +258,7 @@ def get_chart_data(request: HttpRequest, user_profile: UserProfile, chart_name:
|
||||||
logging.warning("User from realm %s attempted to access /stats, but the computed "
|
logging.warning("User from realm %s attempted to access /stats, but the computed "
|
||||||
"start time: %s (creation of realm or installation) is later than the computed "
|
"start time: %s (creation of realm or installation) is later than the computed "
|
||||||
"end time: %s (last successful analytics update). Is the "
|
"end time: %s (last successful analytics update). Is the "
|
||||||
"analytics cron job running?" % (realm.string_id, start, end))
|
"analytics cron job running?", realm.string_id, start, end)
|
||||||
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
|
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
|
||||||
|
|
||||||
assert len({stat.frequency for stat in stats}) == 1
|
assert len({stat.frequency for stat in stats}) == 1
|
||||||
|
|
|
@ -211,7 +211,7 @@ def try_to_copy_venv(venv_path: str, new_packages: Set[str]) -> bool:
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
# Virtualenv-clone is either not installed or threw an
|
# Virtualenv-clone is either not installed or threw an
|
||||||
# error. Just return False: making a new venv is safe.
|
# error. Just return False: making a new venv is safe.
|
||||||
logging.warning("Error cloning virtualenv %s" % (source_venv_path,))
|
logging.warning("Error cloning virtualenv %s", source_venv_path)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# virtualenv-clone, unfortunately, copies the success stamp,
|
# virtualenv-clone, unfortunately, copies the success stamp,
|
||||||
|
|
|
@ -210,7 +210,7 @@ def log_management_command(cmd: str, log_path: str) -> None:
|
||||||
logger.addHandler(file_handler)
|
logger.addHandler(file_handler)
|
||||||
logger.setLevel(logging.INFO)
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
logger.info("Ran '%s'" % (cmd,))
|
logger.info("Ran '%s'", cmd)
|
||||||
|
|
||||||
def get_environment() -> str:
|
def get_environment() -> str:
|
||||||
if os.path.exists(DEPLOYMENTS_DIR):
|
if os.path.exists(DEPLOYMENTS_DIR):
|
||||||
|
|
|
@ -95,7 +95,7 @@ elif vendor == "rhel" and os_version.startswith("7."):
|
||||||
elif vendor == "centos" and os_version == "7":
|
elif vendor == "centos" and os_version == "7":
|
||||||
POSTGRES_VERSION = "10"
|
POSTGRES_VERSION = "10"
|
||||||
else:
|
else:
|
||||||
logging.critical("Unsupported platform: {} {}".format(vendor, os_version))
|
logging.critical("Unsupported platform: %s %s", vendor, os_version)
|
||||||
if vendor == 'ubuntu' and os_version == '14.04':
|
if vendor == 'ubuntu' and os_version == '14.04':
|
||||||
print()
|
print()
|
||||||
print("Ubuntu Trusty reached end-of-life upstream and is no longer a supported platform for Zulip")
|
print("Ubuntu Trusty reached end-of-life upstream and is no longer a supported platform for Zulip")
|
||||||
|
|
|
@ -49,3 +49,29 @@ rules:
|
||||||
- path-not: 'zerver/migrations/0209_user_profile_no_empty_password.py'
|
- path-not: 'zerver/migrations/0209_user_profile_no_empty_password.py'
|
||||||
- path-not: 'zerver/migrations/0260_missed_message_addresses_from_redis_to_db.py'
|
- path-not: 'zerver/migrations/0260_missed_message_addresses_from_redis_to_db.py'
|
||||||
- path-not: 'pgroonga/migrations/0002_html_escape_subject.py'
|
- path-not: 'pgroonga/migrations/0002_html_escape_subject.py'
|
||||||
|
|
||||||
|
- id: logging-format
|
||||||
|
languages: [python]
|
||||||
|
pattern-either:
|
||||||
|
- pattern: logging.debug(... % ...)
|
||||||
|
- pattern: logging.debug(... .format(...))
|
||||||
|
- pattern: logger.debug(... % ...)
|
||||||
|
- pattern: logger.debug(... .format(...))
|
||||||
|
- pattern: logging.info(... % ...)
|
||||||
|
- pattern: logging.info(... .format(...))
|
||||||
|
- pattern: logger.info(... % ...)
|
||||||
|
- pattern: logger.info(... .format(...))
|
||||||
|
- pattern: logging.warning(... % ...)
|
||||||
|
- pattern: logging.warning(... .format(...))
|
||||||
|
- pattern: logger.warning(... % ...)
|
||||||
|
- pattern: logger.warning(... .format(...))
|
||||||
|
- pattern: logging.error(... % ...)
|
||||||
|
- pattern: logging.error(... .format(...))
|
||||||
|
- pattern: logger.error(... % ...)
|
||||||
|
- pattern: logger.error(... .format(...))
|
||||||
|
- pattern: logging.critical(... % ...)
|
||||||
|
- pattern: logging.critical(... .format(...))
|
||||||
|
- pattern: logger.critical(... % ...)
|
||||||
|
- pattern: logger.critical(... .format(...))
|
||||||
|
severity: ERROR
|
||||||
|
message: "Pass format arguments to logging (https://docs.python.org/3/howto/logging.html#optimization)"
|
||||||
|
|
|
@ -199,7 +199,7 @@ def convert_gitter_workspace_messages(gitter_data: GitterDataT, output_dir: str,
|
||||||
message_json['zerver_message'] = zerver_message
|
message_json['zerver_message'] = zerver_message
|
||||||
message_json['zerver_usermessage'] = zerver_usermessage
|
message_json['zerver_usermessage'] = zerver_usermessage
|
||||||
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
|
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
|
||||||
logging.info("Writing Messages to %s\n" % (message_filename,))
|
logging.info("Writing Messages to %s\n", message_filename)
|
||||||
write_data_to_file(os.path.join(message_filename), message_json)
|
write_data_to_file(os.path.join(message_filename), message_json)
|
||||||
|
|
||||||
low_index = upper_index
|
low_index = upper_index
|
||||||
|
@ -216,8 +216,8 @@ def get_usermentions(message: Dict[str, Any], user_map: Dict[str, int],
|
||||||
if mention.get('userId') in user_map:
|
if mention.get('userId') in user_map:
|
||||||
gitter_mention = '@%s' % (mention['screenName'],)
|
gitter_mention = '@%s' % (mention['screenName'],)
|
||||||
if mention['screenName'] not in user_short_name_to_full_name:
|
if mention['screenName'] not in user_short_name_to_full_name:
|
||||||
logging.info("Mentioned user %s never sent any messages, so has no full name data" %
|
logging.info("Mentioned user %s never sent any messages, so has no full name data",
|
||||||
(mention['screenName'],))
|
mention['screenName'])
|
||||||
full_name = mention['screenName']
|
full_name = mention['screenName']
|
||||||
else:
|
else:
|
||||||
full_name = user_short_name_to_full_name[mention['screenName']]
|
full_name = user_short_name_to_full_name[mention['screenName']]
|
||||||
|
@ -278,7 +278,7 @@ def do_convert_data(gitter_data_file: str, output_dir: str, threads: int=6) -> N
|
||||||
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
|
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
|
||||||
|
|
||||||
logging.info('######### DATA CONVERSION FINISHED #########\n')
|
logging.info('######### DATA CONVERSION FINISHED #########\n')
|
||||||
logging.info("Zulip data dump created at %s" % (output_dir,))
|
logging.info("Zulip data dump created at %s", output_dir)
|
||||||
|
|
||||||
def write_data_to_file(output_file: str, data: Any) -> None:
|
def write_data_to_file(output_file: str, data: Any) -> None:
|
||||||
with open(output_file, "w") as f:
|
with open(output_file, "w") as f:
|
||||||
|
|
|
@ -70,14 +70,14 @@ def untar_input_file(tar_file: str) -> str:
|
||||||
data_dir = os.path.abspath(data_dir)
|
data_dir = os.path.abspath(data_dir)
|
||||||
|
|
||||||
if os.path.exists(data_dir):
|
if os.path.exists(data_dir):
|
||||||
logging.info('input data was already untarred to %s, we will use it' % (data_dir,))
|
logging.info('input data was already untarred to %s, we will use it', data_dir)
|
||||||
return data_dir
|
return data_dir
|
||||||
|
|
||||||
os.makedirs(data_dir)
|
os.makedirs(data_dir)
|
||||||
|
|
||||||
subprocess.check_call(['tar', '-xf', tar_file, '-C', data_dir])
|
subprocess.check_call(['tar', '-xf', tar_file, '-C', data_dir])
|
||||||
|
|
||||||
logging.info('input data was untarred to %s' % (data_dir,))
|
logging.info('input data was untarred to %s', data_dir)
|
||||||
|
|
||||||
return data_dir
|
return data_dir
|
||||||
|
|
||||||
|
@ -696,7 +696,7 @@ def process_raw_message_batch(realm_id: int,
|
||||||
content = h.handle(content)
|
content = h.handle(content)
|
||||||
|
|
||||||
if len(content) > 10000:
|
if len(content) > 10000:
|
||||||
logging.info('skipping too-long message of length %s' % (len(content),))
|
logging.info('skipping too-long message of length %s', len(content))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
date_sent = raw_message['date_sent']
|
date_sent = raw_message['date_sent']
|
||||||
|
|
|
@ -572,7 +572,7 @@ def write_avatar_png(avatar_folder: str,
|
||||||
ListJobData = TypeVar('ListJobData')
|
ListJobData = TypeVar('ListJobData')
|
||||||
def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[ListJobData],
|
def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[ListJobData],
|
||||||
threads: int=6) -> Iterable[Tuple[int, List[ListJobData]]]:
|
threads: int=6) -> Iterable[Tuple[int, List[ListJobData]]]:
|
||||||
logging.info("Distributing %s items across %s threads" % (len(full_items), threads))
|
logging.info("Distributing %s items across %s threads", len(full_items), threads)
|
||||||
|
|
||||||
def wrapping_function(items: List[ListJobData]) -> int:
|
def wrapping_function(items: List[ListJobData]) -> int:
|
||||||
count = 0
|
count = 0
|
||||||
|
@ -580,11 +580,11 @@ def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[List
|
||||||
try:
|
try:
|
||||||
f(item)
|
f(item)
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.info("Error processing item: %s" % (item,))
|
logging.info("Error processing item: %s", item)
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
count += 1
|
count += 1
|
||||||
if count % 1000 == 0:
|
if count % 1000 == 0:
|
||||||
logging.info("A download thread finished %s items" % (count,))
|
logging.info("A download thread finished %s items", count)
|
||||||
return 0
|
return 0
|
||||||
job_lists: List[List[ListJobData]] = [full_items[i::threads] for i in range(threads)]
|
job_lists: List[List[ListJobData]] = [full_items[i::threads] for i in range(threads)]
|
||||||
return run_parallel(wrapping_function, job_lists, threads=threads)
|
return run_parallel(wrapping_function, job_lists, threads=threads)
|
||||||
|
|
|
@ -330,7 +330,7 @@ def process_raw_message_batch(realm_id: int,
|
||||||
content = h.handle(content)
|
content = h.handle(content)
|
||||||
|
|
||||||
if len(content) > 10000: # nocoverage
|
if len(content) > 10000: # nocoverage
|
||||||
logging.info('skipping too-long message of length %s' % (len(content),))
|
logging.info('skipping too-long message of length %s', len(content))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
date_sent = raw_message['date_sent']
|
date_sent = raw_message['date_sent']
|
||||||
|
|
|
@ -205,7 +205,7 @@ def users_to_zerver_userprofile(slack_data_dir: str, users: List[ZerverFieldsT],
|
||||||
if not user.get('is_primary_owner', False):
|
if not user.get('is_primary_owner', False):
|
||||||
user_id_count += 1
|
user_id_count += 1
|
||||||
|
|
||||||
logging.info("{} -> {}".format(user['name'], userprofile_dict['email']))
|
logging.info("%s -> %s", user['name'], userprofile_dict['email'])
|
||||||
|
|
||||||
process_customprofilefields(zerver_customprofilefield, zerver_customprofilefield_values)
|
process_customprofilefields(zerver_customprofilefield, zerver_customprofilefield_values)
|
||||||
logging.info('######### IMPORTING USERS FINISHED #########\n')
|
logging.info('######### IMPORTING USERS FINISHED #########\n')
|
||||||
|
@ -397,7 +397,7 @@ def channels_to_zerver_stream(slack_data_dir: str, realm_id: int,
|
||||||
|
|
||||||
stream_id_count += 1
|
stream_id_count += 1
|
||||||
recipient_id_count += 1
|
recipient_id_count += 1
|
||||||
logging.info("{} -> created".format(channel['name']))
|
logging.info("%s -> created", channel['name'])
|
||||||
|
|
||||||
# TODO map Slack's pins to Zulip's stars
|
# TODO map Slack's pins to Zulip's stars
|
||||||
# There is the security model that Slack's pins are known to the team owner
|
# There is the security model that Slack's pins are known to the team owner
|
||||||
|
@ -443,7 +443,7 @@ def channels_to_zerver_stream(slack_data_dir: str, realm_id: int,
|
||||||
|
|
||||||
huddle_id_count += 1
|
huddle_id_count += 1
|
||||||
recipient_id_count += 1
|
recipient_id_count += 1
|
||||||
logging.info("{} -> created".format(mpim['name']))
|
logging.info("%s -> created", mpim['name'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
mpims = get_data_file(slack_data_dir + '/mpims.json')
|
mpims = get_data_file(slack_data_dir + '/mpims.json')
|
||||||
|
@ -595,7 +595,7 @@ def convert_slack_workspace_messages(slack_data_dir: str, users: List[ZerverFiel
|
||||||
zerver_usermessage=zerver_usermessage)
|
zerver_usermessage=zerver_usermessage)
|
||||||
|
|
||||||
message_file = "/messages-%06d.json" % (dump_file_id,)
|
message_file = "/messages-%06d.json" % (dump_file_id,)
|
||||||
logging.info("Writing Messages to %s\n" % (output_dir + message_file,))
|
logging.info("Writing Messages to %s\n", output_dir + message_file)
|
||||||
create_converted_data_files(message_json, output_dir, message_file)
|
create_converted_data_files(message_json, output_dir, message_file)
|
||||||
|
|
||||||
total_reactions += reactions
|
total_reactions += reactions
|
||||||
|
@ -796,8 +796,10 @@ def channel_message_to_zerver_message(realm_id: int,
|
||||||
total_user_messages += num_created
|
total_user_messages += num_created
|
||||||
total_skipped_user_messages += num_skipped
|
total_skipped_user_messages += num_skipped
|
||||||
|
|
||||||
logging.debug("Created %s UserMessages; deferred %s due to long-term idle" % (
|
logging.debug(
|
||||||
total_user_messages, total_skipped_user_messages))
|
"Created %s UserMessages; deferred %s due to long-term idle",
|
||||||
|
total_user_messages, total_skipped_user_messages,
|
||||||
|
)
|
||||||
return zerver_message, zerver_usermessage, zerver_attachment, uploads_list, \
|
return zerver_message, zerver_usermessage, zerver_attachment, uploads_list, \
|
||||||
reaction_list
|
reaction_list
|
||||||
|
|
||||||
|
@ -1091,7 +1093,7 @@ def do_convert_data(slack_zip_file: str, output_dir: str, token: str, threads: i
|
||||||
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
|
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
|
||||||
|
|
||||||
logging.info('######### DATA CONVERSION FINISHED #########\n')
|
logging.info('######### DATA CONVERSION FINISHED #########\n')
|
||||||
logging.info("Zulip data dump created at %s" % (output_dir,))
|
logging.info("Zulip data dump created at %s", output_dir)
|
||||||
|
|
||||||
def get_data_file(path: str) -> Any:
|
def get_data_file(path: str) -> Any:
|
||||||
with open(path) as fp:
|
with open(path) as fp:
|
||||||
|
|
|
@ -224,8 +224,10 @@ def validate_account_and_subdomain(request: HttpRequest, user_profile: UserProfi
|
||||||
not (settings.RUNNING_INSIDE_TORNADO and
|
not (settings.RUNNING_INSIDE_TORNADO and
|
||||||
request.META["SERVER_NAME"] == "127.0.0.1" and
|
request.META["SERVER_NAME"] == "127.0.0.1" and
|
||||||
request.META["REMOTE_ADDR"] == "127.0.0.1")):
|
request.META["REMOTE_ADDR"] == "127.0.0.1")):
|
||||||
logging.warning("User %s (%s) attempted to access API on wrong subdomain (%s)" % (
|
logging.warning(
|
||||||
user_profile.delivery_email, user_profile.realm.subdomain, get_subdomain(request)))
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
|
user_profile.delivery_email, user_profile.realm.subdomain, get_subdomain(request),
|
||||||
|
)
|
||||||
raise JsonableError(_("Account is not associated with this subdomain"))
|
raise JsonableError(_("Account is not associated with this subdomain"))
|
||||||
|
|
||||||
def access_user_by_api_key(request: HttpRequest, api_key: str, email: Optional[str]=None) -> UserProfile:
|
def access_user_by_api_key(request: HttpRequest, api_key: str, email: Optional[str]=None) -> UserProfile:
|
||||||
|
@ -772,8 +774,8 @@ def rate_limit(domain: str='api_by_user') -> Callable[[ViewFuncT], ViewFuncT]:
|
||||||
user = None
|
user = None
|
||||||
|
|
||||||
if not user: # nocoverage # See comments below
|
if not user: # nocoverage # See comments below
|
||||||
logging.error("Requested rate-limiting on %s but user is not authenticated!" %
|
logging.error("Requested rate-limiting on %s but user is not authenticated!",
|
||||||
(func.__name__,))
|
func.__name__)
|
||||||
return func(request, *args, **kwargs)
|
return func(request, *args, **kwargs)
|
||||||
|
|
||||||
if isinstance(user, AnonymousUser): # nocoverage
|
if isinstance(user, AnonymousUser): # nocoverage
|
||||||
|
|
|
@ -244,7 +244,7 @@ class ZulipPasswordResetForm(PasswordResetForm):
|
||||||
realm = get_realm(get_subdomain(request))
|
realm = get_realm(get_subdomain(request))
|
||||||
|
|
||||||
if not email_auth_enabled(realm):
|
if not email_auth_enabled(realm):
|
||||||
logging.info("Password reset attempted for %s even though password auth is disabled." % (email,))
|
logging.info("Password reset attempted for %s even though password auth is disabled.", email)
|
||||||
return
|
return
|
||||||
if email_belongs_to_ldap(realm, email):
|
if email_belongs_to_ldap(realm, email):
|
||||||
# TODO: Ideally, we'd provide a user-facing error here
|
# TODO: Ideally, we'd provide a user-facing error here
|
||||||
|
@ -261,7 +261,7 @@ class ZulipPasswordResetForm(PasswordResetForm):
|
||||||
rate_limit_password_reset_form_by_email(email)
|
rate_limit_password_reset_form_by_email(email)
|
||||||
except RateLimited:
|
except RateLimited:
|
||||||
# TODO: Show an informative, user-facing error message.
|
# TODO: Show an informative, user-facing error message.
|
||||||
logging.info("Too many password reset attempts for email %s" % (email,))
|
logging.info("Too many password reset attempts for email %s", email)
|
||||||
return
|
return
|
||||||
|
|
||||||
user: Optional[UserProfile] = None
|
user: Optional[UserProfile] = None
|
||||||
|
@ -329,8 +329,8 @@ class OurAuthenticationForm(AuthenticationForm):
|
||||||
try:
|
try:
|
||||||
realm = get_realm(subdomain)
|
realm = get_realm(subdomain)
|
||||||
except Realm.DoesNotExist:
|
except Realm.DoesNotExist:
|
||||||
logging.warning("User %s attempted to password login to nonexistent subdomain %s" %
|
logging.warning("User %s attempted to password login to nonexistent subdomain %s",
|
||||||
(username, subdomain))
|
username, subdomain)
|
||||||
raise ValidationError("Realm does not exist")
|
raise ValidationError("Realm does not exist")
|
||||||
|
|
||||||
return_data: Dict[str, Any] = {}
|
return_data: Dict[str, Any] = {}
|
||||||
|
@ -351,8 +351,8 @@ class OurAuthenticationForm(AuthenticationForm):
|
||||||
raise ValidationError(mark_safe(DEACTIVATED_ACCOUNT_ERROR))
|
raise ValidationError(mark_safe(DEACTIVATED_ACCOUNT_ERROR))
|
||||||
|
|
||||||
if return_data.get("invalid_subdomain"):
|
if return_data.get("invalid_subdomain"):
|
||||||
logging.warning("User %s attempted to password login to wrong subdomain %s" %
|
logging.warning("User %s attempted to password login to wrong subdomain %s",
|
||||||
(username, subdomain))
|
username, subdomain)
|
||||||
raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR))
|
raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR))
|
||||||
|
|
||||||
if self.user_cache is None:
|
if self.user_cache is None:
|
||||||
|
|
|
@ -1219,8 +1219,9 @@ def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[i
|
||||||
queue_name = 'embedded_bots'
|
queue_name = 'embedded_bots'
|
||||||
else:
|
else:
|
||||||
logging.error(
|
logging.error(
|
||||||
'Unexpected bot_type for Service bot id=%s: %s' %
|
'Unexpected bot_type for Service bot id=%s: %s',
|
||||||
(user_profile_id, bot_type))
|
user_profile_id, bot_type,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
is_stream = (recipient_type == Recipient.STREAM)
|
is_stream = (recipient_type == Recipient.STREAM)
|
||||||
|
@ -5420,8 +5421,10 @@ def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> boo
|
||||||
#
|
#
|
||||||
# Either case is unusual and suggests a UI bug that got
|
# Either case is unusual and suggests a UI bug that got
|
||||||
# the user in this situation, so we log in these cases.
|
# the user in this situation, so we log in these cases.
|
||||||
logging.warning("User %s tried to share upload %s in message %s, but lacks permission" % (
|
logging.warning(
|
||||||
user_profile.id, path_id, message.id))
|
"User %s tried to share upload %s in message %s, but lacks permission",
|
||||||
|
user_profile.id, path_id, message.id,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
claimed = True
|
claimed = True
|
||||||
|
|
|
@ -198,7 +198,7 @@ class InvalidCacheKeyException(Exception):
|
||||||
|
|
||||||
def log_invalid_cache_keys(stack_trace: str, key: List[str]) -> None:
|
def log_invalid_cache_keys(stack_trace: str, key: List[str]) -> None:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Invalid cache key used: {}\nStack trace: {}\n".format(key, stack_trace)
|
"Invalid cache key used: %s\nStack trace: %s\n", key, stack_trace,
|
||||||
)
|
)
|
||||||
|
|
||||||
def validate_cache_key(key: str) -> None:
|
def validate_cache_key(key: str) -> None:
|
||||||
|
|
|
@ -133,6 +133,6 @@ def fill_remote_cache(cache: str) -> None:
|
||||||
cache_set_many(items_for_remote_cache, timeout=3600*24)
|
cache_set_many(items_for_remote_cache, timeout=3600*24)
|
||||||
items_for_remote_cache = {}
|
items_for_remote_cache = {}
|
||||||
cache_set_many(items_for_remote_cache, timeout=3600*24*7)
|
cache_set_many(items_for_remote_cache, timeout=3600*24*7)
|
||||||
logging.info("Successfully populated %s cache! Consumed %s remote cache queries (%s time)" %
|
logging.info("Successfully populated %s cache! Consumed %s remote cache queries (%s time)",
|
||||||
(cache, get_remote_cache_requests() - remote_cache_requests_start,
|
cache, get_remote_cache_requests() - remote_cache_requests_start,
|
||||||
round(get_remote_cache_time() - remote_cache_time_start, 2)))
|
round(get_remote_cache_time() - remote_cache_time_start, 2))
|
||||||
|
|
|
@ -40,8 +40,8 @@ def interactive_debug_listen() -> None:
|
||||||
|
|
||||||
def tracemalloc_dump() -> None:
|
def tracemalloc_dump() -> None:
|
||||||
if not tracemalloc.is_tracing():
|
if not tracemalloc.is_tracing():
|
||||||
logger.warning("pid {}: tracemalloc off, nothing to dump"
|
logger.warning("pid %s: tracemalloc off, nothing to dump",
|
||||||
.format(os.getpid()))
|
os.getpid())
|
||||||
return
|
return
|
||||||
# Despite our name for it, `timezone_now` always deals in UTC.
|
# Despite our name for it, `timezone_now` always deals in UTC.
|
||||||
basename = "snap.{}.{}".format(os.getpid(),
|
basename = "snap.{}.{}".format(os.getpid(),
|
||||||
|
@ -55,15 +55,15 @@ def tracemalloc_dump() -> None:
|
||||||
with open('/proc/{}/stat'.format(os.getpid()), 'rb') as f:
|
with open('/proc/{}/stat'.format(os.getpid()), 'rb') as f:
|
||||||
procstat = f.read().split()
|
procstat = f.read().split()
|
||||||
rss_pages = int(procstat[23])
|
rss_pages = int(procstat[23])
|
||||||
logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
|
logger.info("tracemalloc dump: tracing %s MiB (%s MiB peak), using %s MiB; rss %s MiB; dumped %s",
|
||||||
.format(tracemalloc.get_traced_memory()[0] // 1048576,
|
tracemalloc.get_traced_memory()[0] // 1048576,
|
||||||
tracemalloc.get_traced_memory()[1] // 1048576,
|
tracemalloc.get_traced_memory()[1] // 1048576,
|
||||||
tracemalloc.get_tracemalloc_memory() // 1048576,
|
tracemalloc.get_tracemalloc_memory() // 1048576,
|
||||||
rss_pages // 256,
|
rss_pages // 256,
|
||||||
basename))
|
basename)
|
||||||
|
|
||||||
def tracemalloc_listen_sock(sock: socket.socket) -> None:
|
def tracemalloc_listen_sock(sock: socket.socket) -> None:
|
||||||
logger.debug('pid {}: tracemalloc_listen_sock started!'.format(os.getpid()))
|
logger.debug('pid %s: tracemalloc_listen_sock started!', os.getpid())
|
||||||
while True:
|
while True:
|
||||||
sock.recv(1)
|
sock.recv(1)
|
||||||
tracemalloc_dump()
|
tracemalloc_dump()
|
||||||
|
@ -75,7 +75,7 @@ def tracemalloc_listen() -> None:
|
||||||
if listener_pid == os.getpid():
|
if listener_pid == os.getpid():
|
||||||
# Already set up -- and in this process, not just its parent.
|
# Already set up -- and in this process, not just its parent.
|
||||||
return
|
return
|
||||||
logger.debug('pid {}: tracemalloc_listen working...'.format(os.getpid()))
|
logger.debug('pid %s: tracemalloc_listen working...', os.getpid())
|
||||||
listener_pid = os.getpid()
|
listener_pid = os.getpid()
|
||||||
|
|
||||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
|
||||||
|
@ -84,8 +84,7 @@ def tracemalloc_listen() -> None:
|
||||||
thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock),
|
thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock),
|
||||||
daemon=True)
|
daemon=True)
|
||||||
thread.start()
|
thread.start()
|
||||||
logger.debug('pid {}: tracemalloc_listen done: {}'.format(
|
logger.debug('pid %s: tracemalloc_listen done: %s', os.getpid(), path)
|
||||||
os.getpid(), path))
|
|
||||||
|
|
||||||
def maybe_tracemalloc_listen() -> None:
|
def maybe_tracemalloc_listen() -> None:
|
||||||
'''If tracemalloc tracing enabled, listen for requests to dump a snapshot.
|
'''If tracemalloc tracing enabled, listen for requests to dump a snapshot.
|
||||||
|
|
|
@ -70,8 +70,10 @@ def enqueue_emails(cutoff: datetime.datetime) -> None:
|
||||||
for user_profile in user_profiles:
|
for user_profile in user_profiles:
|
||||||
if inactive_since(user_profile, cutoff):
|
if inactive_since(user_profile, cutoff):
|
||||||
queue_digest_recipient(user_profile, cutoff)
|
queue_digest_recipient(user_profile, cutoff)
|
||||||
logger.info("User %s is inactive, queuing for potential digest" % (
|
logger.info(
|
||||||
user_profile.id,))
|
"User %s is inactive, queuing for potential digest",
|
||||||
|
user_profile.id,
|
||||||
|
)
|
||||||
|
|
||||||
def gather_hot_conversations(user_profile: UserProfile, messages: List[Message]) -> List[Dict[str, Any]]:
|
def gather_hot_conversations(user_profile: UserProfile, messages: List[Message]) -> List[Dict[str, Any]]:
|
||||||
# Gather stream conversations of 2 types:
|
# Gather stream conversations of 2 types:
|
||||||
|
@ -209,7 +211,7 @@ def handle_digest_email(user_profile_id: int, cutoff: float,
|
||||||
|
|
||||||
# We don't want to send emails containing almost no information.
|
# We don't want to send emails containing almost no information.
|
||||||
if enough_traffic(context["hot_conversations"], new_streams_count):
|
if enough_traffic(context["hot_conversations"], new_streams_count):
|
||||||
logger.info("Sending digest email for user %s" % (user_profile.id,))
|
logger.info("Sending digest email for user %s", user_profile.id)
|
||||||
# Send now, as a ScheduledEmail
|
# Send now, as a ScheduledEmail
|
||||||
send_future_email('zerver/emails/digest', user_profile.realm, to_user_ids=[user_profile.id],
|
send_future_email('zerver/emails/digest', user_profile.realm, to_user_ids=[user_profile.id],
|
||||||
from_name="Zulip Digest", from_address=FromAddress.no_reply_placeholder,
|
from_name="Zulip Digest", from_address=FromAddress.no_reply_placeholder,
|
||||||
|
|
|
@ -200,7 +200,7 @@ def extract_body(message: message.Message, include_quotes: bool=False, prefer_te
|
||||||
html_content = extract_html_body(message, include_quotes)
|
html_content = extract_html_body(message, include_quotes)
|
||||||
|
|
||||||
if plaintext_content is None and html_content is None:
|
if plaintext_content is None and html_content is None:
|
||||||
logging.warning("Content types: %s" % ([part.get_content_type() for part in message.walk()],))
|
logging.warning("Content types: %s", [part.get_content_type() for part in message.walk()])
|
||||||
raise ZulipEmailForwardUserError("Unable to find plaintext or HTML message body")
|
raise ZulipEmailForwardUserError("Unable to find plaintext or HTML message body")
|
||||||
if not plaintext_content and not html_content:
|
if not plaintext_content and not html_content:
|
||||||
raise ZulipEmailForwardUserError("Email has no nonempty body sections; ignoring.")
|
raise ZulipEmailForwardUserError("Email has no nonempty body sections; ignoring.")
|
||||||
|
@ -282,8 +282,8 @@ def extract_and_upload_attachments(message: message.Message, realm: Realm) -> st
|
||||||
formatted_link = "[%s](%s)" % (filename, s3_url)
|
formatted_link = "[%s](%s)" % (filename, s3_url)
|
||||||
attachment_links.append(formatted_link)
|
attachment_links.append(formatted_link)
|
||||||
else:
|
else:
|
||||||
logger.warning("Payload is not bytes (invalid attachment %s in message from %s)." %
|
logger.warning("Payload is not bytes (invalid attachment %s in message from %s).",
|
||||||
(filename, message.get("From")))
|
filename, message.get("From"))
|
||||||
|
|
||||||
return '\n'.join(attachment_links)
|
return '\n'.join(attachment_links)
|
||||||
|
|
||||||
|
@ -343,8 +343,10 @@ def process_stream_message(to: str, message: message.Message) -> None:
|
||||||
send_zulip(
|
send_zulip(
|
||||||
get_system_bot(settings.EMAIL_GATEWAY_BOT),
|
get_system_bot(settings.EMAIL_GATEWAY_BOT),
|
||||||
stream, subject, body)
|
stream, subject, body)
|
||||||
logger.info("Successfully processed email to %s (%s)" % (
|
logger.info(
|
||||||
stream.name, stream.realm.string_id))
|
"Successfully processed email to %s (%s)",
|
||||||
|
stream.name, stream.realm.string_id,
|
||||||
|
)
|
||||||
|
|
||||||
def process_missed_message(to: str, message: message.Message) -> None:
|
def process_missed_message(to: str, message: message.Message) -> None:
|
||||||
mm_address = get_usable_missed_message_address(to)
|
mm_address = get_usable_missed_message_address(to)
|
||||||
|
@ -389,8 +391,10 @@ def process_missed_message(to: str, message: message.Message) -> None:
|
||||||
else:
|
else:
|
||||||
raise AssertionError("Invalid recipient type!")
|
raise AssertionError("Invalid recipient type!")
|
||||||
|
|
||||||
logger.info("Successfully processed email from user %s to %s" % (
|
logger.info(
|
||||||
user_profile.id, recipient_str))
|
"Successfully processed email from user %s to %s",
|
||||||
|
user_profile.id, recipient_str,
|
||||||
|
)
|
||||||
|
|
||||||
def process_message(message: message.Message, rcpt_to: Optional[str]=None) -> None:
|
def process_message(message: message.Message, rcpt_to: Optional[str]=None) -> None:
|
||||||
to: Optional[str] = None
|
to: Optional[str] = None
|
||||||
|
|
|
@ -301,7 +301,7 @@ def sanity_check_output(data: TableData) -> None:
|
||||||
|
|
||||||
for table in tables:
|
for table in tables:
|
||||||
if table not in data:
|
if table not in data:
|
||||||
logging.warning('??? NO DATA EXPORTED FOR TABLE %s!!!' % (table,))
|
logging.warning('??? NO DATA EXPORTED FOR TABLE %s!!!', table)
|
||||||
|
|
||||||
def write_data_to_file(output_file: Path, data: Any) -> None:
|
def write_data_to_file(output_file: Path, data: Any) -> None:
|
||||||
with open(output_file, "w") as f:
|
with open(output_file, "w") as f:
|
||||||
|
@ -451,7 +451,7 @@ def export_from_config(response: TableData, config: Config, seed_object: Optiona
|
||||||
exported_tables = config.custom_tables
|
exported_tables = config.custom_tables
|
||||||
|
|
||||||
for t in exported_tables:
|
for t in exported_tables:
|
||||||
logging.info('Exporting via export_from_config: %s' % (t,))
|
logging.info('Exporting via export_from_config: %s', t)
|
||||||
|
|
||||||
rows = None
|
rows = None
|
||||||
if config.is_seeded:
|
if config.is_seeded:
|
||||||
|
@ -476,7 +476,7 @@ def export_from_config(response: TableData, config: Config, seed_object: Optiona
|
||||||
for t in config.concat_and_destroy:
|
for t in config.concat_and_destroy:
|
||||||
data += response[t]
|
data += response[t]
|
||||||
del response[t]
|
del response[t]
|
||||||
logging.info('Deleted temporary %s' % (t,))
|
logging.info('Deleted temporary %s', t)
|
||||||
assert table is not None
|
assert table is not None
|
||||||
response[table] = data
|
response[table] = data
|
||||||
|
|
||||||
|
@ -945,7 +945,7 @@ def fetch_usermessages(realm: Realm,
|
||||||
user_message_obj['flags_mask'] = user_message.flags.mask
|
user_message_obj['flags_mask'] = user_message.flags.mask
|
||||||
del user_message_obj['flags']
|
del user_message_obj['flags']
|
||||||
user_message_chunk.append(user_message_obj)
|
user_message_chunk.append(user_message_obj)
|
||||||
logging.info("Fetched UserMessages for %s" % (message_filename,))
|
logging.info("Fetched UserMessages for %s", message_filename)
|
||||||
return user_message_chunk
|
return user_message_chunk
|
||||||
|
|
||||||
def export_usermessages_batch(input_path: Path, output_path: Path,
|
def export_usermessages_batch(input_path: Path, output_path: Path,
|
||||||
|
@ -968,7 +968,7 @@ def export_usermessages_batch(input_path: Path, output_path: Path,
|
||||||
|
|
||||||
def write_message_export(message_filename: Path, output: MessageOutput) -> None:
|
def write_message_export(message_filename: Path, output: MessageOutput) -> None:
|
||||||
write_data_to_file(output_file=message_filename, data=output)
|
write_data_to_file(output_file=message_filename, data=output)
|
||||||
logging.info("Dumped to %s" % (message_filename,))
|
logging.info("Dumped to %s", message_filename)
|
||||||
|
|
||||||
def export_partial_message_files(realm: Realm,
|
def export_partial_message_files(realm: Realm,
|
||||||
response: TableData,
|
response: TableData,
|
||||||
|
@ -1107,7 +1107,7 @@ def write_message_partial_for_query(realm: Realm, message_query: Any, dump_file_
|
||||||
# Figure out the name of our shard file.
|
# Figure out the name of our shard file.
|
||||||
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
|
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
|
||||||
message_filename += '.partial'
|
message_filename += '.partial'
|
||||||
logging.info("Fetched Messages for %s" % (message_filename,))
|
logging.info("Fetched Messages for %s", message_filename)
|
||||||
|
|
||||||
# Clean up our messages.
|
# Clean up our messages.
|
||||||
table_data: TableData = {}
|
table_data: TableData = {}
|
||||||
|
@ -1251,7 +1251,7 @@ def export_files_from_s3(realm: Realm, bucket_name: str, output_dir: Path,
|
||||||
bucket = conn.get_bucket(bucket_name, validate=True)
|
bucket = conn.get_bucket(bucket_name, validate=True)
|
||||||
records = []
|
records = []
|
||||||
|
|
||||||
logging.info("Downloading uploaded files from %s" % (bucket_name,))
|
logging.info("Downloading uploaded files from %s", bucket_name)
|
||||||
|
|
||||||
avatar_hash_values = set()
|
avatar_hash_values = set()
|
||||||
user_ids = set()
|
user_ids = set()
|
||||||
|
@ -1293,7 +1293,7 @@ def export_files_from_s3(realm: Realm, bucket_name: str, output_dir: Path,
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
if (count % 100 == 0):
|
if (count % 100 == 0):
|
||||||
logging.info("Finished %s" % (count,))
|
logging.info("Finished %s", count)
|
||||||
|
|
||||||
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
|
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
|
||||||
ujson.dump(records, records_file, indent=4)
|
ujson.dump(records, records_file, indent=4)
|
||||||
|
@ -1321,7 +1321,7 @@ def export_uploads_from_local(realm: Realm, local_dir: Path, output_dir: Path) -
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
if (count % 100 == 0):
|
if (count % 100 == 0):
|
||||||
logging.info("Finished %s" % (count,))
|
logging.info("Finished %s", count)
|
||||||
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
|
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
|
||||||
ujson.dump(records, records_file, indent=4)
|
ujson.dump(records, records_file, indent=4)
|
||||||
|
|
||||||
|
@ -1344,8 +1344,10 @@ def export_avatars_from_local(realm: Realm, local_dir: Path, output_dir: Path) -
|
||||||
wildcard = os.path.join(local_dir, avatar_path + '.*')
|
wildcard = os.path.join(local_dir, avatar_path + '.*')
|
||||||
|
|
||||||
for local_path in glob.glob(wildcard):
|
for local_path in glob.glob(wildcard):
|
||||||
logging.info('Copying avatar file for user %s from %s' % (
|
logging.info(
|
||||||
user.email, local_path))
|
'Copying avatar file for user %s from %s',
|
||||||
|
user.email, local_path,
|
||||||
|
)
|
||||||
fn = os.path.relpath(local_path, local_dir)
|
fn = os.path.relpath(local_path, local_dir)
|
||||||
output_path = os.path.join(output_dir, fn)
|
output_path = os.path.join(output_dir, fn)
|
||||||
os.makedirs(str(os.path.dirname(output_path)), exist_ok=True)
|
os.makedirs(str(os.path.dirname(output_path)), exist_ok=True)
|
||||||
|
@ -1364,7 +1366,7 @@ def export_avatars_from_local(realm: Realm, local_dir: Path, output_dir: Path) -
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
if (count % 100 == 0):
|
if (count % 100 == 0):
|
||||||
logging.info("Finished %s" % (count,))
|
logging.info("Finished %s", count)
|
||||||
|
|
||||||
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
|
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
|
||||||
ujson.dump(records, records_file, indent=4)
|
ujson.dump(records, records_file, indent=4)
|
||||||
|
@ -1416,7 +1418,7 @@ def export_emoji_from_local(realm: Realm, local_dir: Path, output_dir: Path) ->
|
||||||
|
|
||||||
count += 1
|
count += 1
|
||||||
if (count % 100 == 0):
|
if (count % 100 == 0):
|
||||||
logging.info("Finished %s" % (count,))
|
logging.info("Finished %s", count)
|
||||||
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
|
with open(os.path.join(output_dir, "records.json"), "w") as records_file:
|
||||||
ujson.dump(records, records_file, indent=4)
|
ujson.dump(records, records_file, indent=4)
|
||||||
|
|
||||||
|
@ -1428,7 +1430,7 @@ def do_write_stats_file_for_realm_export(output_dir: Path) -> None:
|
||||||
message_files = glob.glob(os.path.join(output_dir, 'messages-*.json'))
|
message_files = glob.glob(os.path.join(output_dir, 'messages-*.json'))
|
||||||
fns = sorted([analytics_file] + [attachment_file] + message_files + [realm_file])
|
fns = sorted([analytics_file] + [attachment_file] + message_files + [realm_file])
|
||||||
|
|
||||||
logging.info('Writing stats file: %s\n' % (stats_file,))
|
logging.info('Writing stats file: %s\n', stats_file)
|
||||||
with open(stats_file, 'w') as f:
|
with open(stats_file, 'w') as f:
|
||||||
for fn in fns:
|
for fn in fns:
|
||||||
f.write(os.path.basename(fn) + '\n')
|
f.write(os.path.basename(fn) + '\n')
|
||||||
|
@ -1487,7 +1489,7 @@ def do_export_realm(realm: Realm, output_dir: Path, threads: int,
|
||||||
message_ids = export_partial_message_files(realm, response, output_dir=output_dir,
|
message_ids = export_partial_message_files(realm, response, output_dir=output_dir,
|
||||||
public_only=public_only,
|
public_only=public_only,
|
||||||
consent_message_id=consent_message_id)
|
consent_message_id=consent_message_id)
|
||||||
logging.info('%d messages were exported' % (len(message_ids),))
|
logging.info('%d messages were exported', len(message_ids))
|
||||||
|
|
||||||
# zerver_reaction
|
# zerver_reaction
|
||||||
zerver_reaction: TableData = {}
|
zerver_reaction: TableData = {}
|
||||||
|
@ -1497,7 +1499,7 @@ def do_export_realm(realm: Realm, output_dir: Path, threads: int,
|
||||||
# Write realm data
|
# Write realm data
|
||||||
export_file = os.path.join(output_dir, "realm.json")
|
export_file = os.path.join(output_dir, "realm.json")
|
||||||
write_data_to_file(output_file=export_file, data=response)
|
write_data_to_file(output_file=export_file, data=response)
|
||||||
logging.info('Writing realm data to %s' % (export_file,))
|
logging.info('Writing realm data to %s', export_file)
|
||||||
|
|
||||||
# Write analytics data
|
# Write analytics data
|
||||||
export_analytics_tables(realm=realm, output_dir=output_dir)
|
export_analytics_tables(realm=realm, output_dir=output_dir)
|
||||||
|
@ -1509,7 +1511,7 @@ def do_export_realm(realm: Realm, output_dir: Path, threads: int,
|
||||||
launch_user_message_subprocesses(threads=threads, output_dir=output_dir,
|
launch_user_message_subprocesses(threads=threads, output_dir=output_dir,
|
||||||
consent_message_id=consent_message_id)
|
consent_message_id=consent_message_id)
|
||||||
|
|
||||||
logging.info("Finished exporting %s" % (realm.string_id,))
|
logging.info("Finished exporting %s", realm.string_id)
|
||||||
create_soft_link(source=output_dir, in_progress=False)
|
create_soft_link(source=output_dir, in_progress=False)
|
||||||
|
|
||||||
do_write_stats_file_for_realm_export(output_dir)
|
do_write_stats_file_for_realm_export(output_dir)
|
||||||
|
@ -1528,7 +1530,7 @@ def export_attachment_table(realm: Realm, output_dir: Path, message_ids: Set[int
|
||||||
response: TableData = {}
|
response: TableData = {}
|
||||||
fetch_attachment_data(response=response, realm_id=realm.id, message_ids=message_ids)
|
fetch_attachment_data(response=response, realm_id=realm.id, message_ids=message_ids)
|
||||||
output_file = os.path.join(output_dir, "attachment.json")
|
output_file = os.path.join(output_dir, "attachment.json")
|
||||||
logging.info('Writing attachment table data to %s' % (output_file,))
|
logging.info('Writing attachment table data to %s', output_file)
|
||||||
write_data_to_file(output_file=output_file, data=response)
|
write_data_to_file(output_file=output_file, data=response)
|
||||||
|
|
||||||
def create_soft_link(source: Path, in_progress: bool=True) -> None:
|
def create_soft_link(source: Path, in_progress: bool=True) -> None:
|
||||||
|
@ -1551,11 +1553,11 @@ def create_soft_link(source: Path, in_progress: bool=True) -> None:
|
||||||
|
|
||||||
overwrite_symlink(source, new_target)
|
overwrite_symlink(source, new_target)
|
||||||
if is_done:
|
if is_done:
|
||||||
logging.info('See %s for output files' % (new_target,))
|
logging.info('See %s for output files', new_target)
|
||||||
|
|
||||||
def launch_user_message_subprocesses(threads: int, output_dir: Path,
|
def launch_user_message_subprocesses(threads: int, output_dir: Path,
|
||||||
consent_message_id: Optional[int]=None) -> None:
|
consent_message_id: Optional[int]=None) -> None:
|
||||||
logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,))
|
logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows', threads)
|
||||||
pids = {}
|
pids = {}
|
||||||
|
|
||||||
for shard_id in range(threads):
|
for shard_id in range(threads):
|
||||||
|
@ -1669,7 +1671,7 @@ def export_messages_single_user(user_profile: UserProfile, output_dir: Path,
|
||||||
message_chunk.append(item)
|
message_chunk.append(item)
|
||||||
|
|
||||||
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
|
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
|
||||||
logging.info("Fetched Messages for %s" % (message_filename,))
|
logging.info("Fetched Messages for %s", message_filename)
|
||||||
|
|
||||||
output = {'zerver_message': message_chunk}
|
output = {'zerver_message': message_chunk}
|
||||||
floatify_datetime_fields(output, 'zerver_message')
|
floatify_datetime_fields(output, 'zerver_message')
|
||||||
|
|
|
@ -63,7 +63,7 @@ def get_timing(message: str, f: Callable[[], None]) -> None:
|
||||||
logger.info(message)
|
logger.info(message)
|
||||||
f()
|
f()
|
||||||
elapsed = time.time() - start
|
elapsed = time.time() - start
|
||||||
logger.info('elapsed time: %.03f\n' % (elapsed,))
|
logger.info('elapsed time: %.03f\n', elapsed)
|
||||||
|
|
||||||
|
|
||||||
def fix_unsubscribed(cursor: CursorObj, user_profile: UserProfile) -> None:
|
def fix_unsubscribed(cursor: CursorObj, user_profile: UserProfile) -> None:
|
||||||
|
@ -126,7 +126,7 @@ def fix_unsubscribed(cursor: CursorObj, user_profile: UserProfile) -> None:
|
||||||
rows = cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
for row in rows:
|
for row in rows:
|
||||||
user_message_ids.append(row[0])
|
user_message_ids.append(row[0])
|
||||||
logger.info('rows found: %d' % (len(user_message_ids),))
|
logger.info('rows found: %d', len(user_message_ids))
|
||||||
|
|
||||||
get_timing(
|
get_timing(
|
||||||
'finding unread messages for non-active streams',
|
'finding unread messages for non-active streams',
|
||||||
|
@ -216,7 +216,7 @@ def fix_pre_pointer(cursor: CursorObj, user_profile: UserProfile) -> None:
|
||||||
for (um_id, recipient_id, topic) in rows:
|
for (um_id, recipient_id, topic) in rows:
|
||||||
if not is_topic_muted(recipient_id, topic):
|
if not is_topic_muted(recipient_id, topic):
|
||||||
user_message_ids.append(um_id)
|
user_message_ids.append(um_id)
|
||||||
logger.info('rows found: %d' % (len(user_message_ids),))
|
logger.info('rows found: %d', len(user_message_ids))
|
||||||
|
|
||||||
get_timing(
|
get_timing(
|
||||||
'finding pre-pointer messages that are not muted',
|
'finding pre-pointer messages that are not muted',
|
||||||
|
@ -235,7 +235,7 @@ def fix_pre_pointer(cursor: CursorObj, user_profile: UserProfile) -> None:
|
||||||
)
|
)
|
||||||
|
|
||||||
def fix(user_profile: UserProfile) -> None:
|
def fix(user_profile: UserProfile) -> None:
|
||||||
logger.info('\n---\nFixing %s:' % (user_profile.id,))
|
logger.info('\n---\nFixing %s:', user_profile.id)
|
||||||
with connection.cursor() as cursor:
|
with connection.cursor() as cursor:
|
||||||
fix_unsubscribed(cursor, user_profile)
|
fix_unsubscribed(cursor, user_profile)
|
||||||
fix_pre_pointer(cursor, user_profile)
|
fix_pre_pointer(cursor, user_profile)
|
||||||
|
|
|
@ -72,7 +72,7 @@ def get_language_name(code: str) -> str:
|
||||||
if code in (lang['code'], lang['locale']):
|
if code in (lang['code'], lang['locale']):
|
||||||
return lang['name']
|
return lang['name']
|
||||||
# Log problem, but still return a name
|
# Log problem, but still return a name
|
||||||
logging.error("Unknown language code '%s'" % (code,))
|
logging.error("Unknown language code '%s'", code)
|
||||||
return "Unknown"
|
return "Unknown"
|
||||||
|
|
||||||
def get_available_language_codes() -> List[str]:
|
def get_available_language_codes() -> List[str]:
|
||||||
|
|
|
@ -317,7 +317,7 @@ def fix_message_rendered_content(realm: Realm,
|
||||||
# * rendering markdown failing with the exception being
|
# * rendering markdown failing with the exception being
|
||||||
# caught in bugdown (which then returns None, causing the the
|
# caught in bugdown (which then returns None, causing the the
|
||||||
# rendered_content assert above to fire).
|
# rendered_content assert above to fire).
|
||||||
logging.warning("Error in markdown rendering for message ID %s; continuing" % (message['id'],))
|
logging.warning("Error in markdown rendering for message ID %s; continuing", message['id'])
|
||||||
|
|
||||||
def current_table_ids(data: TableData, table: TableName) -> List[int]:
|
def current_table_ids(data: TableData, table: TableName) -> List[int]:
|
||||||
"""
|
"""
|
||||||
|
@ -428,10 +428,8 @@ def re_map_foreign_keys_internal(data_table: List[Record],
|
||||||
if old_id in lookup_table:
|
if old_id in lookup_table:
|
||||||
new_id = lookup_table[old_id]
|
new_id = lookup_table[old_id]
|
||||||
if verbose:
|
if verbose:
|
||||||
logging.info('Remapping %s %s from %s to %s' % (table,
|
logging.info('Remapping %s %s from %s to %s',
|
||||||
field_name + '_id',
|
table, field_name + '_id', old_id, new_id)
|
||||||
old_id,
|
|
||||||
new_id))
|
|
||||||
else:
|
else:
|
||||||
new_id = old_id
|
new_id = old_id
|
||||||
if not id_field:
|
if not id_field:
|
||||||
|
@ -479,10 +477,8 @@ def re_map_foreign_keys_many_to_many_internal(table: TableName,
|
||||||
if old_id in lookup_table:
|
if old_id in lookup_table:
|
||||||
new_id = lookup_table[old_id]
|
new_id = lookup_table[old_id]
|
||||||
if verbose:
|
if verbose:
|
||||||
logging.info('Remapping %s %s from %s to %s' % (table,
|
logging.info('Remapping %s %s from %s to %s',
|
||||||
field_name + '_id',
|
table, field_name + '_id', old_id, new_id)
|
||||||
old_id,
|
|
||||||
new_id))
|
|
||||||
else:
|
else:
|
||||||
new_id = old_id
|
new_id = old_id
|
||||||
new_id_list.append(new_id)
|
new_id_list.append(new_id)
|
||||||
|
@ -566,16 +562,16 @@ def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:
|
||||||
process_batch=process_batch,
|
process_batch=process_batch,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id))
|
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
|
||||||
|
|
||||||
def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None:
|
def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None:
|
||||||
table = get_db_table(model)
|
table = get_db_table(model)
|
||||||
# TODO, deprecate dump_file_id
|
# TODO, deprecate dump_file_id
|
||||||
model.objects.bulk_create(model(**item) for item in data[table])
|
model.objects.bulk_create(model(**item) for item in data[table])
|
||||||
if dump_file_id is None:
|
if dump_file_id is None:
|
||||||
logging.info("Successfully imported %s from %s." % (model, table))
|
logging.info("Successfully imported %s from %s.", model, table)
|
||||||
else:
|
else:
|
||||||
logging.info("Successfully imported %s from %s[%s]." % (model, table, dump_file_id))
|
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
|
||||||
|
|
||||||
# Client is a table shared by multiple realms, so in order to
|
# Client is a table shared by multiple realms, so in order to
|
||||||
# correctly import multiple realms into the same server, we need to
|
# correctly import multiple realms into the same server, we need to
|
||||||
|
@ -627,7 +623,7 @@ def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_av
|
||||||
for record in records:
|
for record in records:
|
||||||
count += 1
|
count += 1
|
||||||
if count % 1000 == 0:
|
if count % 1000 == 0:
|
||||||
logging.info("Processed %s/%s uploads" % (count, len(records)))
|
logging.info("Processed %s/%s uploads", count, len(records))
|
||||||
|
|
||||||
if processing_avatars:
|
if processing_avatars:
|
||||||
# For avatars, we need to rehash the user ID with the
|
# For avatars, we need to rehash the user ID with the
|
||||||
|
@ -677,7 +673,7 @@ def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_av
|
||||||
user_profile_id = int(record['user_profile_id'])
|
user_profile_id = int(record['user_profile_id'])
|
||||||
# Support email gateway bot and other cross-realm messages
|
# Support email gateway bot and other cross-realm messages
|
||||||
if user_profile_id in ID_MAP["user_profile"]:
|
if user_profile_id in ID_MAP["user_profile"]:
|
||||||
logging.info("Uploaded by ID mapped user: %s!" % (user_profile_id,))
|
logging.info("Uploaded by ID mapped user: %s!", user_profile_id)
|
||||||
user_profile_id = ID_MAP["user_profile"][user_profile_id]
|
user_profile_id = ID_MAP["user_profile"][user_profile_id]
|
||||||
user_profile = get_user_profile_by_id(user_profile_id)
|
user_profile = get_user_profile_by_id(user_profile_id)
|
||||||
key.set_metadata("user_profile_id", str(user_profile.id))
|
key.set_metadata("user_profile_id", str(user_profile.id))
|
||||||
|
@ -733,8 +729,10 @@ def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_av
|
||||||
if record.get("importer_should_thumbnail"):
|
if record.get("importer_should_thumbnail"):
|
||||||
upload_backend.ensure_basic_avatar_image(user_profile=user_profile)
|
upload_backend.ensure_basic_avatar_image(user_profile=user_profile)
|
||||||
except BadImageError:
|
except BadImageError:
|
||||||
logging.warning("Could not thumbnail avatar image for user %s; ignoring" % (
|
logging.warning(
|
||||||
user_profile.id,))
|
"Could not thumbnail avatar image for user %s; ignoring",
|
||||||
|
user_profile.id,
|
||||||
|
)
|
||||||
# Delete the record of the avatar to avoid 404s.
|
# Delete the record of the avatar to avoid 404s.
|
||||||
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR)
|
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR)
|
||||||
return 0
|
return 0
|
||||||
|
@ -767,7 +765,7 @@ def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_av
|
||||||
# faithful, we have to use a set of fixers (e.g. on DateTime objects
|
# faithful, we have to use a set of fixers (e.g. on DateTime objects
|
||||||
# and Foreign Keys) to do the import correctly.
|
# and Foreign Keys) to do the import correctly.
|
||||||
def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm:
|
def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm:
|
||||||
logging.info("Importing realm dump %s" % (import_dir,))
|
logging.info("Importing realm dump %s", import_dir)
|
||||||
if not os.path.exists(import_dir):
|
if not os.path.exists(import_dir):
|
||||||
raise Exception("Missing import directory!")
|
raise Exception("Missing import directory!")
|
||||||
|
|
||||||
|
@ -778,7 +776,7 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm
|
||||||
if not server_initialized():
|
if not server_initialized():
|
||||||
create_internal_realm()
|
create_internal_realm()
|
||||||
|
|
||||||
logging.info("Importing realm data from %s" % (realm_data_filename,))
|
logging.info("Importing realm data from %s", realm_data_filename)
|
||||||
with open(realm_data_filename) as f:
|
with open(realm_data_filename) as f:
|
||||||
data = ujson.load(f)
|
data = ujson.load(f)
|
||||||
remove_denormalized_recipient_column_from_data(data)
|
remove_denormalized_recipient_column_from_data(data)
|
||||||
|
@ -833,7 +831,7 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm
|
||||||
# Remap the user IDs for notification_bot and friends to their
|
# Remap the user IDs for notification_bot and friends to their
|
||||||
# appropriate IDs on this server
|
# appropriate IDs on this server
|
||||||
for item in data['zerver_userprofile_crossrealm']:
|
for item in data['zerver_userprofile_crossrealm']:
|
||||||
logging.info("Adding to ID map: %s %s" % (item['id'], get_system_bot(item['email']).id))
|
logging.info("Adding to ID map: %s %s", item['id'], get_system_bot(item['email']).id)
|
||||||
new_user_id = get_system_bot(item['email']).id
|
new_user_id = get_system_bot(item['email']).id
|
||||||
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
|
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
|
||||||
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
|
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
|
||||||
|
@ -1070,7 +1068,7 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm
|
||||||
if not os.path.exists(fn):
|
if not os.path.exists(fn):
|
||||||
raise Exception("Missing attachment.json file!")
|
raise Exception("Missing attachment.json file!")
|
||||||
|
|
||||||
logging.info("Importing attachment data from %s" % (fn,))
|
logging.info("Importing attachment data from %s", fn)
|
||||||
with open(fn) as f:
|
with open(fn) as f:
|
||||||
data = ujson.load(f)
|
data = ujson.load(f)
|
||||||
|
|
||||||
|
@ -1194,7 +1192,7 @@ def import_message_data(realm: Realm,
|
||||||
with open(message_filename) as f:
|
with open(message_filename) as f:
|
||||||
data = ujson.load(f)
|
data = ujson.load(f)
|
||||||
|
|
||||||
logging.info("Importing message dump %s" % (message_filename,))
|
logging.info("Importing message dump %s", message_filename)
|
||||||
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
|
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
|
||||||
re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table="recipient")
|
re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table="recipient")
|
||||||
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
|
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
|
||||||
|
@ -1299,14 +1297,14 @@ def import_attachments(data: TableData) -> None:
|
||||||
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
|
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
|
||||||
cursor.executemany(sql_template, tups)
|
cursor.executemany(sql_template, tups)
|
||||||
|
|
||||||
logging.info('Successfully imported M2M table %s' % (m2m_table_name,))
|
logging.info('Successfully imported M2M table %s', m2m_table_name)
|
||||||
|
|
||||||
def import_analytics_data(realm: Realm, import_dir: Path) -> None:
|
def import_analytics_data(realm: Realm, import_dir: Path) -> None:
|
||||||
analytics_filename = os.path.join(import_dir, "analytics.json")
|
analytics_filename = os.path.join(import_dir, "analytics.json")
|
||||||
if not os.path.exists(analytics_filename):
|
if not os.path.exists(analytics_filename):
|
||||||
return
|
return
|
||||||
|
|
||||||
logging.info("Importing analytics data from %s" % (analytics_filename,))
|
logging.info("Importing analytics data from %s", analytics_filename)
|
||||||
with open(analytics_filename) as f:
|
with open(analytics_filename) as f:
|
||||||
data = ujson.load(f)
|
data = ujson.load(f)
|
||||||
|
|
||||||
|
|
|
@ -244,8 +244,10 @@ def request_retry(event: Dict[str, Any],
|
||||||
bot_user = get_user_profile_by_id(event['user_profile_id'])
|
bot_user = get_user_profile_by_id(event['user_profile_id'])
|
||||||
fail_with_message(event, "Bot is unavailable")
|
fail_with_message(event, "Bot is unavailable")
|
||||||
notify_bot_owner(event, request_data, failure_message=failure_message)
|
notify_bot_owner(event, request_data, failure_message=failure_message)
|
||||||
logging.warning("Maximum retries exceeded for trigger:%s event:%s" % (
|
logging.warning(
|
||||||
bot_user.email, event['command']))
|
"Maximum retries exceeded for trigger:%s event:%s",
|
||||||
|
bot_user.email, event['command'],
|
||||||
|
)
|
||||||
|
|
||||||
retry_event('outgoing_webhooks', event, failure_processor)
|
retry_event('outgoing_webhooks', event, failure_processor)
|
||||||
|
|
||||||
|
@ -288,23 +290,25 @@ def do_rest_call(base_url: str,
|
||||||
else:
|
else:
|
||||||
logging.warning("Message %(message_url)s triggered an outgoing webhook, returning status "
|
logging.warning("Message %(message_url)s triggered an outgoing webhook, returning status "
|
||||||
"code %(status_code)s.\n Content of response (in quotes): \""
|
"code %(status_code)s.\n Content of response (in quotes): \""
|
||||||
"%(response)s\""
|
"%(response)s\"",
|
||||||
% {'message_url': get_message_url(event),
|
{'message_url': get_message_url(event),
|
||||||
'status_code': response.status_code,
|
'status_code': response.status_code,
|
||||||
'response': response.content})
|
'response': response.content})
|
||||||
failure_message = "Third party responded with %d" % (response.status_code,)
|
failure_message = "Third party responded with %d" % (response.status_code,)
|
||||||
fail_with_message(event, failure_message)
|
fail_with_message(event, failure_message)
|
||||||
notify_bot_owner(event, request_data, response.status_code, response.content)
|
notify_bot_owner(event, request_data, response.status_code, response.content)
|
||||||
|
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
logging.info("Trigger event %s on %s timed out. Retrying" % (
|
logging.info(
|
||||||
event["command"], event['service_name']))
|
"Trigger event %s on %s timed out. Retrying",
|
||||||
|
event["command"], event['service_name'],
|
||||||
|
)
|
||||||
failure_message = "A timeout occurred."
|
failure_message = "A timeout occurred."
|
||||||
request_retry(event, request_data, failure_message=failure_message)
|
request_retry(event, request_data, failure_message=failure_message)
|
||||||
|
|
||||||
except requests.exceptions.ConnectionError:
|
except requests.exceptions.ConnectionError:
|
||||||
logging.info("Trigger event %s on %s resulted in a connection error. Retrying"
|
logging.info("Trigger event %s on %s resulted in a connection error. Retrying",
|
||||||
% (event["command"], event['service_name']))
|
event["command"], event['service_name'])
|
||||||
failure_message = "A connection error occurred. Is my bot server down?"
|
failure_message = "A connection error occurred. Is my bot server down?"
|
||||||
request_retry(event, request_data, failure_message=failure_message)
|
request_retry(event, request_data, failure_message=failure_message)
|
||||||
|
|
||||||
|
|
|
@ -164,7 +164,7 @@ def send_apple_push_notification(user_id: int, devices: List[DeviceToken],
|
||||||
logger.info("APNs: Success sending for user %d to device %s",
|
logger.info("APNs: Success sending for user %d to device %s",
|
||||||
user_id, device.token)
|
user_id, device.token)
|
||||||
elif result in ["Unregistered", "BadDeviceToken", "DeviceTokenNotForTopic"]:
|
elif result in ["Unregistered", "BadDeviceToken", "DeviceTokenNotForTopic"]:
|
||||||
logger.info("APNs: Removing invalid/expired token %s (%s)" % (device.token, result))
|
logger.info("APNs: Removing invalid/expired token %s (%s)", device.token, result)
|
||||||
# We remove all entries for this token (There
|
# We remove all entries for this token (There
|
||||||
# could be multiple for different Zulip servers).
|
# could be multiple for different Zulip servers).
|
||||||
DeviceTokenClass.objects.filter(token=device.token, kind=DeviceTokenClass.APNS).delete()
|
DeviceTokenClass.objects.filter(token=device.token, kind=DeviceTokenClass.APNS).delete()
|
||||||
|
@ -280,7 +280,7 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A
|
||||||
|
|
||||||
if res and 'success' in res:
|
if res and 'success' in res:
|
||||||
for reg_id, msg_id in res['success'].items():
|
for reg_id, msg_id in res['success'].items():
|
||||||
logger.info("GCM: Sent %s as %s" % (reg_id, msg_id))
|
logger.info("GCM: Sent %s as %s", reg_id, msg_id)
|
||||||
|
|
||||||
if remote:
|
if remote:
|
||||||
DeviceTokenClass = RemotePushDeviceToken
|
DeviceTokenClass = RemotePushDeviceToken
|
||||||
|
@ -294,7 +294,7 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A
|
||||||
for reg_id, new_reg_id in res['canonical'].items():
|
for reg_id, new_reg_id in res['canonical'].items():
|
||||||
if reg_id == new_reg_id:
|
if reg_id == new_reg_id:
|
||||||
# I'm not sure if this should happen. In any case, not really actionable.
|
# I'm not sure if this should happen. In any case, not really actionable.
|
||||||
logger.warning("GCM: Got canonical ref but it already matches our ID %s!" % (reg_id,))
|
logger.warning("GCM: Got canonical ref but it already matches our ID %s!", reg_id)
|
||||||
elif not DeviceTokenClass.objects.filter(token=new_reg_id,
|
elif not DeviceTokenClass.objects.filter(token=new_reg_id,
|
||||||
kind=DeviceTokenClass.GCM).count():
|
kind=DeviceTokenClass.GCM).count():
|
||||||
# This case shouldn't happen; any time we get a canonical ref it should have been
|
# This case shouldn't happen; any time we get a canonical ref it should have been
|
||||||
|
@ -302,13 +302,13 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A
|
||||||
#
|
#
|
||||||
# That said, recovery is easy: just update the current PDT object to use the new ID.
|
# That said, recovery is easy: just update the current PDT object to use the new ID.
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"GCM: Got canonical ref %s replacing %s but new ID not registered! Updating." %
|
"GCM: Got canonical ref %s replacing %s but new ID not registered! Updating.",
|
||||||
(new_reg_id, reg_id))
|
new_reg_id, reg_id)
|
||||||
DeviceTokenClass.objects.filter(
|
DeviceTokenClass.objects.filter(
|
||||||
token=reg_id, kind=DeviceTokenClass.GCM).update(token=new_reg_id)
|
token=reg_id, kind=DeviceTokenClass.GCM).update(token=new_reg_id)
|
||||||
else:
|
else:
|
||||||
# Since we know the new ID is registered in our system we can just drop the old one.
|
# Since we know the new ID is registered in our system we can just drop the old one.
|
||||||
logger.info("GCM: Got canonical ref %s, dropping %s" % (new_reg_id, reg_id))
|
logger.info("GCM: Got canonical ref %s, dropping %s", new_reg_id, reg_id)
|
||||||
|
|
||||||
DeviceTokenClass.objects.filter(token=reg_id, kind=DeviceTokenClass.GCM).delete()
|
DeviceTokenClass.objects.filter(token=reg_id, kind=DeviceTokenClass.GCM).delete()
|
||||||
|
|
||||||
|
@ -316,13 +316,13 @@ def send_android_push_notification(devices: List[DeviceToken], data: Dict[str, A
|
||||||
for error, reg_ids in res['errors'].items():
|
for error, reg_ids in res['errors'].items():
|
||||||
if error in ['NotRegistered', 'InvalidRegistration']:
|
if error in ['NotRegistered', 'InvalidRegistration']:
|
||||||
for reg_id in reg_ids:
|
for reg_id in reg_ids:
|
||||||
logger.info("GCM: Removing %s" % (reg_id,))
|
logger.info("GCM: Removing %s", reg_id)
|
||||||
# We remove all entries for this token (There
|
# We remove all entries for this token (There
|
||||||
# could be multiple for different Zulip servers).
|
# could be multiple for different Zulip servers).
|
||||||
DeviceTokenClass.objects.filter(token=reg_id, kind=DeviceTokenClass.GCM).delete()
|
DeviceTokenClass.objects.filter(token=reg_id, kind=DeviceTokenClass.GCM).delete()
|
||||||
else:
|
else:
|
||||||
for reg_id in reg_ids:
|
for reg_id in reg_ids:
|
||||||
logger.warning("GCM: Delivery to %s failed: %s" % (reg_id, error))
|
logger.warning("GCM: Delivery to %s failed: %s", reg_id, error)
|
||||||
|
|
||||||
# python-gcm handles retrying of the unsent messages.
|
# python-gcm handles retrying of the unsent messages.
|
||||||
# Ref: https://github.com/geeknam/python-gcm/blob/master/gcm/gcm.py#L497
|
# Ref: https://github.com/geeknam/python-gcm/blob/master/gcm/gcm.py#L497
|
||||||
|
@ -707,8 +707,10 @@ def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any
|
||||||
# If the cause is a race with the message being deleted,
|
# If the cause is a race with the message being deleted,
|
||||||
# that's normal and we have no need to log an error.
|
# that's normal and we have no need to log an error.
|
||||||
return
|
return
|
||||||
logging.error("Unexpected message access failure handling push notifications: %s %s" % (
|
logging.error(
|
||||||
user_profile.id, missed_message['message_id']))
|
"Unexpected message access failure handling push notifications: %s %s",
|
||||||
|
user_profile.id, missed_message['message_id'],
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
if user_message is not None:
|
if user_message is not None:
|
||||||
|
@ -730,15 +732,17 @@ def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any
|
||||||
# queue for messages they haven't received if they're
|
# queue for messages they haven't received if they're
|
||||||
# long-term idle; anything else is likely a bug.
|
# long-term idle; anything else is likely a bug.
|
||||||
if not user_profile.long_term_idle:
|
if not user_profile.long_term_idle:
|
||||||
logger.error("Could not find UserMessage with message_id %s and user_id %s" % (
|
logger.error(
|
||||||
missed_message['message_id'], user_profile_id))
|
"Could not find UserMessage with message_id %s and user_id %s",
|
||||||
|
missed_message['message_id'], user_profile_id,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
message.trigger = missed_message['trigger']
|
message.trigger = missed_message['trigger']
|
||||||
|
|
||||||
apns_payload = get_message_payload_apns(user_profile, message)
|
apns_payload = get_message_payload_apns(user_profile, message)
|
||||||
gcm_payload, gcm_options = get_message_payload_gcm(user_profile, message)
|
gcm_payload, gcm_options = get_message_payload_gcm(user_profile, message)
|
||||||
logger.info("Sending push notifications to mobile clients for user %s" % (user_profile_id,))
|
logger.info("Sending push notifications to mobile clients for user %s", user_profile_id)
|
||||||
|
|
||||||
if uses_notification_bouncer():
|
if uses_notification_bouncer():
|
||||||
send_notifications_to_bouncer(user_profile_id,
|
send_notifications_to_bouncer(user_profile_id,
|
||||||
|
|
|
@ -195,7 +195,7 @@ class ExceptionFreeTornadoConnection(pika.adapters.tornado_connection.TornadoCon
|
||||||
pika.exceptions.ProbableAccessDeniedError,
|
pika.exceptions.ProbableAccessDeniedError,
|
||||||
pika.exceptions.IncompatibleProtocolError) as e:
|
pika.exceptions.IncompatibleProtocolError) as e:
|
||||||
logging.warning("Caught exception '%r' in ExceptionFreeTornadoConnection when \
|
logging.warning("Caught exception '%r' in ExceptionFreeTornadoConnection when \
|
||||||
calling _adapter_disconnect, ignoring" % (e,))
|
calling _adapter_disconnect, ignoring", e)
|
||||||
|
|
||||||
|
|
||||||
class TornadoQueueClient(SimpleQueueClient):
|
class TornadoQueueClient(SimpleQueueClient):
|
||||||
|
|
|
@ -455,7 +455,7 @@ class RedisRateLimiterBackend(RateLimiterBackend):
|
||||||
try:
|
try:
|
||||||
cls.incr_ratelimit(entity_key, max_api_calls, max_api_window)
|
cls.incr_ratelimit(entity_key, max_api_calls, max_api_window)
|
||||||
except RateLimiterLockingException:
|
except RateLimiterLockingException:
|
||||||
logger.warning("Deadlock trying to incr_ratelimit for %s" % (entity_key,))
|
logger.warning("Deadlock trying to incr_ratelimit for %s", entity_key)
|
||||||
# rate-limit users who are hitting the API so hard we can't update our stats.
|
# rate-limit users who are hitting the API so hard we can't update our stats.
|
||||||
ratelimited = True
|
ratelimited = True
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ def run_archiving_in_chunks(query: str, type: int, realm: Optional[Realm]=None,
|
||||||
while True:
|
while True:
|
||||||
with transaction.atomic():
|
with transaction.atomic():
|
||||||
archive_transaction = ArchiveTransaction.objects.create(type=type, realm=realm)
|
archive_transaction = ArchiveTransaction.objects.create(type=type, realm=realm)
|
||||||
logger.info("Archiving in {}".format(archive_transaction))
|
logger.info("Archiving in %s", archive_transaction)
|
||||||
new_chunk = move_rows(Message, query, chunk_size=chunk_size, returning_id=True,
|
new_chunk = move_rows(Message, query, chunk_size=chunk_size, returning_id=True,
|
||||||
archive_transaction_id=archive_transaction.id, **kwargs)
|
archive_transaction_id=archive_transaction.id, **kwargs)
|
||||||
if new_chunk:
|
if new_chunk:
|
||||||
|
@ -102,7 +102,7 @@ def run_archiving_in_chunks(query: str, type: int, realm: Optional[Realm]=None,
|
||||||
# This line needs to be outside of the atomic block, to capture the actual moment
|
# This line needs to be outside of the atomic block, to capture the actual moment
|
||||||
# archiving of the chunk is finished (since Django does some significant additional work
|
# archiving of the chunk is finished (since Django does some significant additional work
|
||||||
# when leaving the block).
|
# when leaving the block).
|
||||||
logger.info("Finished. Archived {} messages in this transaction.".format(len(new_chunk)))
|
logger.info("Finished. Archived %s messages in this transaction.", len(new_chunk))
|
||||||
|
|
||||||
# We run the loop, until the query returns fewer results than chunk_size,
|
# We run the loop, until the query returns fewer results than chunk_size,
|
||||||
# which means we are done:
|
# which means we are done:
|
||||||
|
@ -269,7 +269,7 @@ def archive_messages_by_recipient(recipient: Recipient, message_retention_days:
|
||||||
def archive_personal_and_huddle_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
|
def archive_personal_and_huddle_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
|
||||||
logger.info("Archiving personal and huddle messages for realm " + realm.string_id)
|
logger.info("Archiving personal and huddle messages for realm " + realm.string_id)
|
||||||
message_count = move_expired_personal_and_huddle_messages_to_archive(realm, chunk_size)
|
message_count = move_expired_personal_and_huddle_messages_to_archive(realm, chunk_size)
|
||||||
logger.info("Done. Archived {} messages".format(message_count))
|
logger.info("Done. Archived %s messages", message_count)
|
||||||
|
|
||||||
def archive_stream_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
|
def archive_stream_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
|
||||||
logger.info("Archiving stream messages for realm " + realm.string_id)
|
logger.info("Archiving stream messages for realm " + realm.string_id)
|
||||||
|
@ -296,10 +296,10 @@ def archive_stream_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) ->
|
||||||
recipient, retention_policy_dict[recipient.type_id], realm, chunk_size
|
recipient, retention_policy_dict[recipient.type_id], realm, chunk_size
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Done. Archived {} messages.".format(message_count))
|
logger.info("Done. Archived %s messages.", message_count)
|
||||||
|
|
||||||
def archive_messages(chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
|
def archive_messages(chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
|
||||||
logger.info("Starting the archiving process with chunk_size {}".format(chunk_size))
|
logger.info("Starting the archiving process with chunk_size %s", chunk_size)
|
||||||
|
|
||||||
# We exclude SYSTEM_BOT_REALM here because the logic for archiving
|
# We exclude SYSTEM_BOT_REALM here because the logic for archiving
|
||||||
# private messages and huddles isn't designed to correctly handle
|
# private messages and huddles isn't designed to correctly handle
|
||||||
|
@ -393,7 +393,7 @@ def restore_attachment_messages_from_archive(archive_transaction_id: int) -> Non
|
||||||
cursor.execute(query.format(archive_transaction_id=archive_transaction_id))
|
cursor.execute(query.format(archive_transaction_id=archive_transaction_id))
|
||||||
|
|
||||||
def restore_data_from_archive(archive_transaction: ArchiveTransaction) -> int:
|
def restore_data_from_archive(archive_transaction: ArchiveTransaction) -> int:
|
||||||
logger.info("Restoring {}".format(archive_transaction))
|
logger.info("Restoring %s", archive_transaction)
|
||||||
# transaction.atomic needs to be used here, rather than being a wrapper on the whole function,
|
# transaction.atomic needs to be used here, rather than being a wrapper on the whole function,
|
||||||
# so that when we log "Finished", the process has indeed finished - and that happens only after
|
# so that when we log "Finished", the process has indeed finished - and that happens only after
|
||||||
# leaving the atomic block - Django does work committing the changes to the database when
|
# leaving the atomic block - Django does work committing the changes to the database when
|
||||||
|
@ -407,7 +407,7 @@ def restore_data_from_archive(archive_transaction: ArchiveTransaction) -> int:
|
||||||
archive_transaction.restored = True
|
archive_transaction.restored = True
|
||||||
archive_transaction.save()
|
archive_transaction.save()
|
||||||
|
|
||||||
logger.info("Finished. Restored {} messages".format(len(msg_ids)))
|
logger.info("Finished. Restored %s messages", len(msg_ids))
|
||||||
return len(msg_ids)
|
return len(msg_ids)
|
||||||
|
|
||||||
def restore_data_from_archive_by_transactions(archive_transactions: List[ArchiveTransaction]) -> int:
|
def restore_data_from_archive_by_transactions(archive_transactions: List[ArchiveTransaction]) -> int:
|
||||||
|
@ -422,10 +422,10 @@ def restore_data_from_archive_by_transactions(archive_transactions: List[Archive
|
||||||
def restore_data_from_archive_by_realm(realm: Realm) -> None:
|
def restore_data_from_archive_by_realm(realm: Realm) -> None:
|
||||||
transactions = ArchiveTransaction.objects.exclude(restored=True).filter(
|
transactions = ArchiveTransaction.objects.exclude(restored=True).filter(
|
||||||
realm=realm, type=ArchiveTransaction.RETENTION_POLICY_BASED)
|
realm=realm, type=ArchiveTransaction.RETENTION_POLICY_BASED)
|
||||||
logger.info("Restoring {} transactions from realm {}".format(len(transactions), realm.string_id))
|
logger.info("Restoring %s transactions from realm %s", len(transactions), realm.string_id)
|
||||||
message_count = restore_data_from_archive_by_transactions(transactions)
|
message_count = restore_data_from_archive_by_transactions(transactions)
|
||||||
|
|
||||||
logger.info("Finished. Restored {} messages from realm {}".format(message_count, realm.string_id))
|
logger.info("Finished. Restored %s messages from realm %s", message_count, realm.string_id)
|
||||||
|
|
||||||
def restore_all_data_from_archive(restore_manual_transactions: bool=True) -> None:
|
def restore_all_data_from_archive(restore_manual_transactions: bool=True) -> None:
|
||||||
for realm in Realm.objects.all():
|
for realm in Realm.objects.all():
|
||||||
|
@ -444,4 +444,4 @@ def clean_archived_data() -> None:
|
||||||
count = transactions.count()
|
count = transactions.count()
|
||||||
transactions.delete()
|
transactions.delete()
|
||||||
|
|
||||||
logger.info("Deleted {} old ArchiveTransactions.".format(count))
|
logger.info("Deleted %s old ArchiveTransactions.", count)
|
||||||
|
|
|
@ -100,7 +100,7 @@ def build_email(template_prefix: str, to_user_ids: Optional[List[int]]=None,
|
||||||
(html_message, message, email_subject) = render_templates()
|
(html_message, message, email_subject) = render_templates()
|
||||||
else:
|
else:
|
||||||
(html_message, message, email_subject) = render_templates()
|
(html_message, message, email_subject) = render_templates()
|
||||||
logger.warning("Missing language for email template '{}'".format(template_prefix))
|
logger.warning("Missing language for email template '%s'", template_prefix)
|
||||||
|
|
||||||
if from_name is None:
|
if from_name is None:
|
||||||
from_name = "Zulip"
|
from_name = "Zulip"
|
||||||
|
@ -153,10 +153,10 @@ def send_email(template_prefix: str, to_user_ids: Optional[List[int]]=None,
|
||||||
from_name=from_name, from_address=from_address,
|
from_name=from_name, from_address=from_address,
|
||||||
reply_to_email=reply_to_email, language=language, context=context)
|
reply_to_email=reply_to_email, language=language, context=context)
|
||||||
template = template_prefix.split("/")[-1]
|
template = template_prefix.split("/")[-1]
|
||||||
logger.info("Sending %s email to %s" % (template, mail.to))
|
logger.info("Sending %s email to %s", template, mail.to)
|
||||||
|
|
||||||
if mail.send() == 0:
|
if mail.send() == 0:
|
||||||
logger.error("Error sending %s email to %s" % (template, mail.to))
|
logger.error("Error sending %s email to %s", template, mail.to)
|
||||||
raise EmailNotDeliveredException
|
raise EmailNotDeliveredException
|
||||||
|
|
||||||
def send_email_from_dict(email_dict: Mapping[str, Any]) -> None:
|
def send_email_from_dict(email_dict: Mapping[str, Any]) -> None:
|
||||||
|
|
|
@ -53,7 +53,7 @@ def delete_all_deactivated_user_sessions() -> None:
|
||||||
continue
|
continue
|
||||||
user_profile = get_user_profile_by_id(user_profile_id)
|
user_profile = get_user_profile_by_id(user_profile_id)
|
||||||
if not user_profile.is_active or user_profile.realm.deactivated:
|
if not user_profile.is_active or user_profile.realm.deactivated:
|
||||||
logging.info("Deactivating session for deactivated user %s" % (user_profile.id,))
|
logging.info("Deactivating session for deactivated user %s", user_profile.id)
|
||||||
delete_session(session)
|
delete_session(session)
|
||||||
|
|
||||||
def set_expirable_session_var(session: Session, var_name: str, var_value: Any, expiry_seconds: int) -> None:
|
def set_expirable_session_var(session: Session, var_name: str, var_value: Any, expiry_seconds: int) -> None:
|
||||||
|
@ -68,7 +68,7 @@ def get_expirable_session_var(session: Session, var_name: str, default_value: An
|
||||||
try:
|
try:
|
||||||
value, expire_at = (session[var_name]['value'], session[var_name]['expire_at'])
|
value, expire_at = (session[var_name]['value'], session[var_name]['expire_at'])
|
||||||
except (KeyError, TypeError) as e:
|
except (KeyError, TypeError) as e:
|
||||||
logging.warning("get_expirable_session_var: Variable {}: {}".format(var_name, e))
|
logging.warning("get_expirable_session_var: Variable %s: %s", var_name, e)
|
||||||
return default_value
|
return default_value
|
||||||
|
|
||||||
if timestamp_to_datetime(expire_at) < timezone_now():
|
if timestamp_to_datetime(expire_at) < timezone_now():
|
||||||
|
|
|
@ -210,7 +210,7 @@ def do_soft_deactivate_user(user_profile: UserProfile) -> None:
|
||||||
user_profile.save(update_fields=[
|
user_profile.save(update_fields=[
|
||||||
'long_term_idle',
|
'long_term_idle',
|
||||||
'last_active_message_id'])
|
'last_active_message_id'])
|
||||||
logger.info('Soft Deactivated user %s' % (user_profile.id,))
|
logger.info('Soft Deactivated user %s', user_profile.id)
|
||||||
|
|
||||||
def do_soft_deactivate_users(users: List[UserProfile]) -> List[UserProfile]:
|
def do_soft_deactivate_users(users: List[UserProfile]) -> List[UserProfile]:
|
||||||
BATCH_SIZE = 100
|
BATCH_SIZE = 100
|
||||||
|
@ -234,8 +234,8 @@ def do_soft_deactivate_users(users: List[UserProfile]) -> List[UserProfile]:
|
||||||
users_soft_deactivated.append(user)
|
users_soft_deactivated.append(user)
|
||||||
RealmAuditLog.objects.bulk_create(realm_logs)
|
RealmAuditLog.objects.bulk_create(realm_logs)
|
||||||
|
|
||||||
logging.info("Soft-deactivated batch of %s users; %s remain to process" %
|
logging.info("Soft-deactivated batch of %s users; %s remain to process",
|
||||||
(len(user_batch), len(users)))
|
len(user_batch), len(users))
|
||||||
|
|
||||||
return users_soft_deactivated
|
return users_soft_deactivated
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ def reactivate_user_if_soft_deactivated(user_profile: UserProfile) -> Union[User
|
||||||
event_type=RealmAuditLog.USER_SOFT_ACTIVATED,
|
event_type=RealmAuditLog.USER_SOFT_ACTIVATED,
|
||||||
event_time=timezone_now()
|
event_time=timezone_now()
|
||||||
)
|
)
|
||||||
logger.info('Soft Reactivated user %s' % (user_profile.id,))
|
logger.info('Soft Reactivated user %s', user_profile.id)
|
||||||
return user_profile
|
return user_profile
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -301,7 +301,7 @@ def do_catch_up_soft_deactivated_users(users: List[UserProfile]) -> List[UserPro
|
||||||
if user_profile.long_term_idle:
|
if user_profile.long_term_idle:
|
||||||
add_missing_messages(user_profile)
|
add_missing_messages(user_profile)
|
||||||
users_caught_up.append(user_profile)
|
users_caught_up.append(user_profile)
|
||||||
logger.info("Caught up %d soft-deactivated users" % (len(users_caught_up),))
|
logger.info("Caught up %d soft-deactivated users", len(users_caught_up))
|
||||||
return users_caught_up
|
return users_caught_up
|
||||||
|
|
||||||
def get_soft_deactivated_users_for_catch_up(filter_kwargs: Any) -> List[UserProfile]:
|
def get_soft_deactivated_users_for_catch_up(filter_kwargs: Any) -> List[UserProfile]:
|
||||||
|
|
|
@ -25,7 +25,7 @@ def transfer_avatars_to_s3(processes: int) -> None:
|
||||||
try:
|
try:
|
||||||
with open(file_path, 'rb') as f:
|
with open(file_path, 'rb') as f:
|
||||||
s3backend.upload_avatar_image(f, user, user)
|
s3backend.upload_avatar_image(f, user, user)
|
||||||
logging.info("Uploaded avatar for {} in realm {}".format(user.id, user.realm.name))
|
logging.info("Uploaded avatar for %s in realm %s", user.id, user.realm.name)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
pass
|
pass
|
||||||
return 0
|
return 0
|
||||||
|
@ -48,7 +48,7 @@ def transfer_message_files_to_s3(processes: int) -> None:
|
||||||
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
|
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
|
||||||
guessed_type = guess_type(attachment.file_name)[0]
|
guessed_type = guess_type(attachment.file_name)[0]
|
||||||
upload_image_to_s3(bucket_name, attachment.path_id, guessed_type, attachment.owner, f.read())
|
upload_image_to_s3(bucket_name, attachment.path_id, guessed_type, attachment.owner, f.read())
|
||||||
logging.info("Uploaded message file in path {}".format(file_path))
|
logging.info("Uploaded message file in path %s", file_path)
|
||||||
except FileNotFoundError: # nocoverage
|
except FileNotFoundError: # nocoverage
|
||||||
pass
|
pass
|
||||||
return 0
|
return 0
|
||||||
|
@ -75,7 +75,7 @@ def transfer_emoji_to_s3(processes: int) -> None:
|
||||||
try:
|
try:
|
||||||
with open(emoji_path, 'rb') as f:
|
with open(emoji_path, 'rb') as f:
|
||||||
s3backend.upload_emoji_image(f, realm_emoji.file_name, realm_emoji.author)
|
s3backend.upload_emoji_image(f, realm_emoji.file_name, realm_emoji.author)
|
||||||
logging.info("Uploaded emoji file in path {}".format(emoji_path))
|
logging.info("Uploaded emoji file in path %s", emoji_path)
|
||||||
except FileNotFoundError: # nocoverage
|
except FileNotFoundError: # nocoverage
|
||||||
pass
|
pass
|
||||||
return 0
|
return 0
|
||||||
|
|
|
@ -357,7 +357,7 @@ class S3UploadBackend(ZulipUploadBackend):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
file_name = path_id.split("/")[-1]
|
file_name = path_id.split("/")[-1]
|
||||||
logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,))
|
logging.warning("%s does not exist. Its entry in the database will be removed.", file_name)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int,
|
def upload_message_file(self, uploaded_file_name: str, uploaded_file_size: int,
|
||||||
|
@ -655,7 +655,7 @@ def delete_local_file(type: str, path: str) -> bool:
|
||||||
os.remove(file_path)
|
os.remove(file_path)
|
||||||
return True
|
return True
|
||||||
file_name = path.split("/")[-1]
|
file_name = path.split("/")[-1]
|
||||||
logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,))
|
logging.warning("%s does not exist. Its entry in the database will be removed.", file_name)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_local_file_path(path_id: str) -> Optional[str]:
|
def get_local_file_path(path_id: str) -> Optional[str]:
|
||||||
|
|
|
@ -138,9 +138,11 @@ class AdminNotifyHandler(logging.Handler):
|
||||||
|
|
||||||
if settings.DEBUG_ERROR_REPORTING: # nocoverage
|
if settings.DEBUG_ERROR_REPORTING: # nocoverage
|
||||||
logging.warning("Reporting an error to admins...")
|
logging.warning("Reporting an error to admins...")
|
||||||
logging.warning("Reporting an error to admins: {} {} {} {} {}" .format(
|
logging.warning(
|
||||||
|
"Reporting an error to admins: %s %s %s %s %s",
|
||||||
record.levelname, report['logger_name'], report['log_module'],
|
record.levelname, report['logger_name'], report['log_module'],
|
||||||
report['message'], report['stack_trace']))
|
report['message'], report['stack_trace'],
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if settings.STAGING_ERROR_NOTIFICATIONS:
|
if settings.STAGING_ERROR_NOTIFICATIONS:
|
||||||
|
|
|
@ -32,12 +32,12 @@ class Command(BaseCommand):
|
||||||
|
|
||||||
age = int(client.ttl(key))
|
age = int(client.ttl(key))
|
||||||
if age < 0:
|
if age < 0:
|
||||||
logging.error("Found key with age of %s, will never expire: %s" % (age, key,))
|
logging.error("Found key with age of %s, will never expire: %s", age, key)
|
||||||
|
|
||||||
count = count_func()
|
count = count_func()
|
||||||
if count > max_calls:
|
if count > max_calls:
|
||||||
logging.error("Redis health check found key with more elements \
|
logging.error("Redis health check found key with more elements \
|
||||||
than max_api_calls! (trying to trim) %s %s" % (key, count))
|
than max_api_calls! (trying to trim) %s %s", key, count)
|
||||||
if trim_func is not None:
|
if trim_func is not None:
|
||||||
client.expire(key, entity.max_api_window())
|
client.expire(key, entity.max_api_window())
|
||||||
trim_func(key, max_calls)
|
trim_func(key, max_calls)
|
||||||
|
|
|
@ -46,7 +46,7 @@ Usage: ./manage.py deliver_email
|
||||||
try:
|
try:
|
||||||
deliver_email(job)
|
deliver_email(job)
|
||||||
except EmailNotDeliveredException:
|
except EmailNotDeliveredException:
|
||||||
logger.warning("%r not delivered" % (job,))
|
logger.warning("%r not delivered", job)
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
else:
|
else:
|
||||||
# Less load on the db during times of activity,
|
# Less load on the db during times of activity,
|
||||||
|
|
|
@ -32,7 +32,7 @@ class Command(BaseCommand):
|
||||||
help='ID of the message advertising users to react with thumbs up')
|
help='ID of the message advertising users to react with thumbs up')
|
||||||
|
|
||||||
def handle(self, *args: Any, **options: Any) -> None:
|
def handle(self, *args: Any, **options: Any) -> None:
|
||||||
logging.info("Starting UserMessage batch thread %s" % (options['thread'],))
|
logging.info("Starting UserMessage batch thread %s", options['thread'])
|
||||||
files = set(glob.glob(os.path.join(options['path'], 'messages-*.json.partial')))
|
files = set(glob.glob(os.path.join(options['path'], 'messages-*.json.partial')))
|
||||||
for partial_path in files:
|
for partial_path in files:
|
||||||
locked_path = partial_path.replace(".json.partial", ".json.locked")
|
locked_path = partial_path.replace(".json.partial", ".json.locked")
|
||||||
|
@ -42,7 +42,7 @@ class Command(BaseCommand):
|
||||||
except Exception:
|
except Exception:
|
||||||
# Already claimed by another process
|
# Already claimed by another process
|
||||||
continue
|
continue
|
||||||
logging.info("Thread %s processing %s" % (options['thread'], output_path))
|
logging.info("Thread %s processing %s", options['thread'], output_path)
|
||||||
try:
|
try:
|
||||||
export_usermessages_batch(locked_path, output_path, options["consent_message_id"])
|
export_usermessages_batch(locked_path, output_path, options["consent_message_id"])
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
|
@ -57,7 +57,7 @@ class Command(BaseCommand):
|
||||||
td = Threaded_worker(queue_name)
|
td = Threaded_worker(queue_name)
|
||||||
td.start()
|
td.start()
|
||||||
assert len(queues) == cnt
|
assert len(queues) == cnt
|
||||||
logger.info('%d queue worker threads were launched' % (cnt,))
|
logger.info('%d queue worker threads were launched', cnt)
|
||||||
|
|
||||||
if options['all']:
|
if options['all']:
|
||||||
signal.signal(signal.SIGUSR1, exit_with_three)
|
signal.signal(signal.SIGUSR1, exit_with_three)
|
||||||
|
@ -70,12 +70,12 @@ class Command(BaseCommand):
|
||||||
queue_name = options['queue_name']
|
queue_name = options['queue_name']
|
||||||
worker_num = options['worker_num']
|
worker_num = options['worker_num']
|
||||||
|
|
||||||
logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name))
|
logger.info("Worker %d connecting to queue %s", worker_num, queue_name)
|
||||||
worker = get_worker(queue_name)
|
worker = get_worker(queue_name)
|
||||||
worker.setup()
|
worker.setup()
|
||||||
|
|
||||||
def signal_handler(signal: int, frame: FrameType) -> None:
|
def signal_handler(signal: int, frame: FrameType) -> None:
|
||||||
logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name))
|
logger.info("Worker %d disconnecting from queue %s", worker_num, queue_name)
|
||||||
worker.stop()
|
worker.stop()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
signal.signal(signal.SIGTERM, signal_handler)
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
|
|
@ -74,7 +74,7 @@ class Command(ZulipBaseCommand):
|
||||||
msgs.update(flags=models.F('flags').bitand(~flag))
|
msgs.update(flags=models.F('flags').bitand(~flag))
|
||||||
|
|
||||||
if not options["for_real"]:
|
if not options["for_real"]:
|
||||||
logging.info("Updating %s by %s %s" % (mids, op, flag))
|
logging.info("Updating %s by %s %s", mids, op, flag)
|
||||||
logging.info("Dry run completed. Run with --for-real to change message flags.")
|
logging.info("Dry run completed. Run with --for-real to change message flags.")
|
||||||
raise CommandError
|
raise CommandError
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ def sync_ldap_user_data(user_profiles: List[UserProfile], deactivation_protectio
|
||||||
try:
|
try:
|
||||||
sync_user_from_ldap(u, logger)
|
sync_user_from_ldap(u, logger)
|
||||||
except ZulipLDAPException as e:
|
except ZulipLDAPException as e:
|
||||||
logger.error("Error attempting to update user %s:" % (u.delivery_email,))
|
logger.error("Error attempting to update user %s:", u.delivery_email)
|
||||||
logger.error(e)
|
logger.error(e)
|
||||||
|
|
||||||
if deactivation_protection:
|
if deactivation_protection:
|
||||||
|
|
|
@ -230,7 +230,7 @@ def write_log_line(log_data: MutableMapping[str, Any], path: str, method: str, r
|
||||||
error_data = repr(b''.join(error_content_list))
|
error_data = repr(b''.join(error_content_list))
|
||||||
if len(error_data) > 200:
|
if len(error_data) > 200:
|
||||||
error_data = "[content more than 200 characters]"
|
error_data = "[content more than 200 characters]"
|
||||||
logger.info('status=%3d, data=%s, uid=%s' % (status_code, error_data, requestor_for_logs))
|
logger.info('status=%3d, data=%s, uid=%s', status_code, error_data, requestor_for_logs)
|
||||||
|
|
||||||
class LogRequests(MiddlewareMixin):
|
class LogRequests(MiddlewareMixin):
|
||||||
# We primarily are doing logging using the process_view hook, but
|
# We primarily are doing logging using the process_view hook, but
|
||||||
|
|
|
@ -1554,13 +1554,13 @@ class SAMLAuthBackendTest(SocialAuthBase):
|
||||||
result = self.client_get('/login/saml/')
|
result = self.client_get('/login/saml/')
|
||||||
self.assertEqual(result.status_code, 302)
|
self.assertEqual(result.status_code, 302)
|
||||||
self.assertEqual('/login/', result.url)
|
self.assertEqual('/login/', result.url)
|
||||||
m.assert_called_with("/login/saml/ : Bad idp param: KeyError: 'idp'.")
|
m.assert_called_with("/login/saml/ : Bad idp param: KeyError: %s.", "'idp'")
|
||||||
|
|
||||||
with mock.patch('zproject.backends.logging.info') as m:
|
with mock.patch('zproject.backends.logging.info') as m:
|
||||||
result = self.client_get('/login/saml/?idp=bad_idp')
|
result = self.client_get('/login/saml/?idp=bad_idp')
|
||||||
self.assertEqual(result.status_code, 302)
|
self.assertEqual(result.status_code, 302)
|
||||||
self.assertEqual('/login/', result.url)
|
self.assertEqual('/login/', result.url)
|
||||||
m.assert_called_with("/login/saml/ : Bad idp param: KeyError: 'bad_idp'.")
|
m.assert_called_with("/login/saml/ : Bad idp param: KeyError: %s.", "'bad_idp'")
|
||||||
|
|
||||||
def test_social_auth_invalid_email(self) -> None:
|
def test_social_auth_invalid_email(self) -> None:
|
||||||
"""
|
"""
|
||||||
|
@ -1635,7 +1635,8 @@ class SAMLAuthBackendTest(SocialAuthBase):
|
||||||
self.assertEqual(result.status_code, 302)
|
self.assertEqual(result.status_code, 302)
|
||||||
self.assertEqual('/login/', result.url)
|
self.assertEqual('/login/', result.url)
|
||||||
m.assert_called_with(
|
m.assert_called_with(
|
||||||
"User authenticated with IdP test_idp but this provider is not enabled for this realm zephyr."
|
"User authenticated with IdP %s but this provider is not enabled for this realm %s.",
|
||||||
|
"test_idp", "zephyr",
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_social_auth_saml_login_bad_idp_arg(self) -> None:
|
def test_social_auth_saml_login_bad_idp_arg(self) -> None:
|
||||||
|
@ -1739,8 +1740,10 @@ class GitHubAuthBackendTest(SocialAuthBase):
|
||||||
email_data=email_data)
|
email_data=email_data)
|
||||||
self.assertEqual(result.status_code, 302)
|
self.assertEqual(result.status_code, 302)
|
||||||
self.assertEqual(result.url, "/login/")
|
self.assertEqual(result.url, "/login/")
|
||||||
mock_warning.assert_called_once_with("Social auth (GitHub) failed "
|
mock_warning.assert_called_once_with(
|
||||||
"because user has no verified emails")
|
"Social auth (%s) failed because user has no verified emails",
|
||||||
|
"GitHub",
|
||||||
|
)
|
||||||
|
|
||||||
@override_settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp')
|
@override_settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp')
|
||||||
def test_social_auth_github_team_not_member_failed(self) -> None:
|
def test_social_auth_github_team_not_member_failed(self) -> None:
|
||||||
|
@ -2016,8 +2019,11 @@ class GitHubAuthBackendTest(SocialAuthBase):
|
||||||
email_data=email_data)
|
email_data=email_data)
|
||||||
self.assertEqual(result.status_code, 302)
|
self.assertEqual(result.status_code, 302)
|
||||||
self.assertEqual(result.url, "/login/")
|
self.assertEqual(result.url, "/login/")
|
||||||
mock_warning.assert_called_once_with("Social auth (GitHub) failed because user has no verified"
|
mock_warning.assert_called_once_with(
|
||||||
" emails associated with the account")
|
"Social auth (%s) failed because user has no verified"
|
||||||
|
" emails associated with the account",
|
||||||
|
"GitHub",
|
||||||
|
)
|
||||||
|
|
||||||
def test_github_oauth2_email_not_associated(self) -> None:
|
def test_github_oauth2_email_not_associated(self) -> None:
|
||||||
account_data_dict = dict(email='not-associated@zulip.com', name=self.name)
|
account_data_dict = dict(email='not-associated@zulip.com', name=self.name)
|
||||||
|
@ -2037,8 +2043,11 @@ class GitHubAuthBackendTest(SocialAuthBase):
|
||||||
email_data=email_data)
|
email_data=email_data)
|
||||||
self.assertEqual(result.status_code, 302)
|
self.assertEqual(result.status_code, 302)
|
||||||
self.assertEqual(result.url, "/login/")
|
self.assertEqual(result.url, "/login/")
|
||||||
mock_warning.assert_called_once_with("Social auth (GitHub) failed because user has no verified"
|
mock_warning.assert_called_once_with(
|
||||||
" emails associated with the account")
|
"Social auth (%s) failed because user has no verified"
|
||||||
|
" emails associated with the account",
|
||||||
|
"GitHub",
|
||||||
|
)
|
||||||
|
|
||||||
class GitLabAuthBackendTest(SocialAuthBase):
|
class GitLabAuthBackendTest(SocialAuthBase):
|
||||||
__unittest_skip__ = False
|
__unittest_skip__ = False
|
||||||
|
@ -2085,8 +2094,10 @@ class GoogleAuthBackendTest(SocialAuthBase):
|
||||||
subdomain='zulip')
|
subdomain='zulip')
|
||||||
self.assertEqual(result.status_code, 302)
|
self.assertEqual(result.status_code, 302)
|
||||||
self.assertEqual(result.url, "/login/")
|
self.assertEqual(result.url, "/login/")
|
||||||
mock_warning.assert_called_once_with("Social auth (Google) failed "
|
mock_warning.assert_called_once_with(
|
||||||
"because user has no verified emails")
|
"Social auth (%s) failed because user has no verified emails",
|
||||||
|
"Google",
|
||||||
|
)
|
||||||
|
|
||||||
def test_social_auth_mobile_success_legacy_url(self) -> None:
|
def test_social_auth_mobile_success_legacy_url(self) -> None:
|
||||||
mobile_flow_otp = '1234abcd' * 8
|
mobile_flow_otp = '1234abcd' * 8
|
||||||
|
@ -2171,7 +2182,7 @@ class GoogleAuthBackendTest(SocialAuthBase):
|
||||||
'redirect_to': ''} # type: ExternalAuthDataDict
|
'redirect_to': ''} # type: ExternalAuthDataDict
|
||||||
with mock.patch("logging.warning") as mock_warn:
|
with mock.patch("logging.warning") as mock_warn:
|
||||||
result = self.get_log_into_subdomain(data, force_token='nonsense')
|
result = self.get_log_into_subdomain(data, force_token='nonsense')
|
||||||
mock_warn.assert_called_once_with("log_into_subdomain: Malformed token given: nonsense")
|
mock_warn.assert_called_once_with("log_into_subdomain: Malformed token given: %s", "nonsense")
|
||||||
self.assertEqual(result.status_code, 400)
|
self.assertEqual(result.status_code, 400)
|
||||||
|
|
||||||
def test_log_into_subdomain_when_token_not_found(self) -> None:
|
def test_log_into_subdomain_when_token_not_found(self) -> None:
|
||||||
|
@ -2183,7 +2194,7 @@ class GoogleAuthBackendTest(SocialAuthBase):
|
||||||
with mock.patch("logging.warning") as mock_warn:
|
with mock.patch("logging.warning") as mock_warn:
|
||||||
token = generate_random_token(ExternalAuthResult.LOGIN_TOKEN_LENGTH)
|
token = generate_random_token(ExternalAuthResult.LOGIN_TOKEN_LENGTH)
|
||||||
result = self.get_log_into_subdomain(data, force_token=token)
|
result = self.get_log_into_subdomain(data, force_token=token)
|
||||||
mock_warn.assert_called_once_with("log_into_subdomain: Invalid token given: %s" % (token,))
|
mock_warn.assert_called_once_with("log_into_subdomain: Invalid token given: %s", token)
|
||||||
self.assertEqual(result.status_code, 400)
|
self.assertEqual(result.status_code, 400)
|
||||||
self.assert_in_response("Invalid or expired login session.", result)
|
self.assert_in_response("Invalid or expired login session.", result)
|
||||||
|
|
||||||
|
@ -3188,7 +3199,7 @@ class DjangoToLDAPUsernameTests(ZulipTestCase):
|
||||||
with mock.patch("zproject.backends.logging.warning") as mock_warn:
|
with mock.patch("zproject.backends.logging.warning") as mock_warn:
|
||||||
with self.assertRaises(ZulipLDAPExceptionNoMatchingLDAPUser):
|
with self.assertRaises(ZulipLDAPExceptionNoMatchingLDAPUser):
|
||||||
self.backend.django_to_ldap_username("shared_email@zulip.com")
|
self.backend.django_to_ldap_username("shared_email@zulip.com")
|
||||||
mock_warn.assert_called_with("Multiple users with email shared_email@zulip.com found in LDAP.")
|
mock_warn.assert_called_with("Multiple users with email %s found in LDAP.", "shared_email@zulip.com")
|
||||||
|
|
||||||
# Test on a weird case of a user whose uid is an email and his actual "mail"
|
# Test on a weird case of a user whose uid is an email and his actual "mail"
|
||||||
# attribute is a different email address:
|
# attribute is a different email address:
|
||||||
|
@ -3740,7 +3751,7 @@ class TestZulipLDAPUserPopulator(ZulipLDAPTestCase):
|
||||||
mock_logger = mock.MagicMock()
|
mock_logger = mock.MagicMock()
|
||||||
result = sync_user_from_ldap(othello, mock_logger)
|
result = sync_user_from_ldap(othello, mock_logger)
|
||||||
mock_logger.warning.assert_called_once_with(
|
mock_logger.warning.assert_called_once_with(
|
||||||
"Did not find %s in LDAP." % (othello.delivery_email,))
|
"Did not find %s in LDAP.", othello.delivery_email)
|
||||||
self.assertFalse(result)
|
self.assertFalse(result)
|
||||||
|
|
||||||
do_deactivate_user(othello)
|
do_deactivate_user(othello)
|
||||||
|
@ -3810,7 +3821,8 @@ class TestZulipLDAPUserPopulator(ZulipLDAPTestCase):
|
||||||
with mock.patch('logging.warning') as mock_warning:
|
with mock.patch('logging.warning') as mock_warning:
|
||||||
self.perform_ldap_sync(self.example_user('hamlet'))
|
self.perform_ldap_sync(self.example_user('hamlet'))
|
||||||
mock_warning.assert_called_once_with(
|
mock_warning.assert_called_once_with(
|
||||||
'Could not parse jpegPhoto field for user %s' % (hamlet.id,))
|
'Could not parse %s field for user %s', 'jpegPhoto', hamlet.id,
|
||||||
|
)
|
||||||
|
|
||||||
def test_deactivate_non_matching_users(self) -> None:
|
def test_deactivate_non_matching_users(self) -> None:
|
||||||
with self.settings(LDAP_APPEND_DOMAIN='zulip.com',
|
with self.settings(LDAP_APPEND_DOMAIN='zulip.com',
|
||||||
|
|
|
@ -199,16 +199,18 @@ class SafeCacheFunctionsTest(ZulipTestCase):
|
||||||
with patch('zerver.lib.cache.logger.warning') as mock_warn:
|
with patch('zerver.lib.cache.logger.warning') as mock_warn:
|
||||||
safe_cache_set_many(items)
|
safe_cache_set_many(items)
|
||||||
mock_warn.assert_called_once()
|
mock_warn.assert_called_once()
|
||||||
warning_string = mock_warn.call_args[0][0]
|
self.assertEqual(
|
||||||
self.assertIn("badkey1", warning_string)
|
mock_warn.call_args[0][1],
|
||||||
self.assertIn("badkey2", warning_string)
|
['SafeFunctionsTest:\nbadkey1', 'SafeFunctionsTest:\nbadkey2'],
|
||||||
|
)
|
||||||
|
|
||||||
with patch('zerver.lib.cache.logger.warning') as mock_warn:
|
with patch('zerver.lib.cache.logger.warning') as mock_warn:
|
||||||
result = safe_cache_get_many(list(items.keys()))
|
result = safe_cache_get_many(list(items.keys()))
|
||||||
mock_warn.assert_called_once()
|
mock_warn.assert_called_once()
|
||||||
warning_string = mock_warn.call_args[0][0]
|
self.assertEqual(
|
||||||
self.assertIn("badkey1", warning_string)
|
mock_warn.call_args[0][1],
|
||||||
self.assertIn("badkey2", warning_string)
|
['SafeFunctionsTest:\nbadkey1', 'SafeFunctionsTest:\nbadkey2'],
|
||||||
|
)
|
||||||
|
|
||||||
self.assertEqual(result, {})
|
self.assertEqual(result, {})
|
||||||
|
|
||||||
|
@ -220,16 +222,18 @@ class SafeCacheFunctionsTest(ZulipTestCase):
|
||||||
with patch('zerver.lib.cache.logger.warning') as mock_warn:
|
with patch('zerver.lib.cache.logger.warning') as mock_warn:
|
||||||
safe_cache_set_many(items)
|
safe_cache_set_many(items)
|
||||||
mock_warn.assert_called_once()
|
mock_warn.assert_called_once()
|
||||||
warning_string = mock_warn.call_args[0][0]
|
self.assertEqual(
|
||||||
self.assertIn("badkey1", warning_string)
|
mock_warn.call_args[0][1],
|
||||||
self.assertIn("badkey2", warning_string)
|
['SafeFunctionsTest:\nbadkey1', 'SafeFunctionsTest:\nbadkey2'],
|
||||||
|
)
|
||||||
|
|
||||||
with patch('zerver.lib.cache.logger.warning') as mock_warn:
|
with patch('zerver.lib.cache.logger.warning') as mock_warn:
|
||||||
result = safe_cache_get_many(list(items.keys()))
|
result = safe_cache_get_many(list(items.keys()))
|
||||||
mock_warn.assert_called_once()
|
mock_warn.assert_called_once()
|
||||||
warning_string = mock_warn.call_args[0][0]
|
self.assertEqual(
|
||||||
self.assertIn("badkey1", warning_string)
|
mock_warn.call_args[0][1],
|
||||||
self.assertIn("badkey2", warning_string)
|
['SafeFunctionsTest:\nbadkey1', 'SafeFunctionsTest:\nbadkey2'],
|
||||||
|
)
|
||||||
|
|
||||||
self.assertEqual(result, good_items)
|
self.assertEqual(result, good_items)
|
||||||
|
|
||||||
|
|
|
@ -276,8 +276,9 @@ class DecoratorTestCase(TestCase):
|
||||||
api_result = my_webhook(request) # type: ignore[call-arg] # mypy doesn't seem to apply the decorator
|
api_result = my_webhook(request) # type: ignore[call-arg] # mypy doesn't seem to apply the decorator
|
||||||
|
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
"User {} ({}) attempted to access API on wrong "
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
"subdomain ({})".format(webhook_bot_email, 'zulip', ''))
|
webhook_bot_email, 'zulip', '',
|
||||||
|
)
|
||||||
|
|
||||||
with mock.patch('logging.warning') as mock_warning:
|
with mock.patch('logging.warning') as mock_warning:
|
||||||
with self.assertRaisesRegex(JsonableError,
|
with self.assertRaisesRegex(JsonableError,
|
||||||
|
@ -286,8 +287,9 @@ class DecoratorTestCase(TestCase):
|
||||||
api_result = my_webhook(request) # type: ignore[call-arg] # mypy doesn't seem to apply the decorator
|
api_result = my_webhook(request) # type: ignore[call-arg] # mypy doesn't seem to apply the decorator
|
||||||
|
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
"User {} ({}) attempted to access API on wrong "
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
"subdomain ({})".format(webhook_bot_email, 'zulip', 'acme'))
|
webhook_bot_email, 'zulip', 'acme',
|
||||||
|
)
|
||||||
|
|
||||||
request.host = "zulip.testserver"
|
request.host = "zulip.testserver"
|
||||||
# Test when content_type is application/json and request.body
|
# Test when content_type is application/json and request.body
|
||||||
|
@ -1321,8 +1323,9 @@ class TestValidateApiKey(ZulipTestCase):
|
||||||
self.default_bot.email, api_key)
|
self.default_bot.email, api_key)
|
||||||
|
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
"User {} ({}) attempted to access API on wrong "
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
"subdomain ({})".format(self.default_bot.email, 'zulip', ''))
|
self.default_bot.email, 'zulip', '',
|
||||||
|
)
|
||||||
|
|
||||||
with mock.patch('logging.warning') as mock_warning:
|
with mock.patch('logging.warning') as mock_warning:
|
||||||
with self.assertRaisesRegex(JsonableError,
|
with self.assertRaisesRegex(JsonableError,
|
||||||
|
@ -1331,8 +1334,9 @@ class TestValidateApiKey(ZulipTestCase):
|
||||||
self.default_bot.email, api_key)
|
self.default_bot.email, api_key)
|
||||||
|
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
"User {} ({}) attempted to access API on wrong "
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
"subdomain ({})".format(self.default_bot.email, 'zulip', 'acme'))
|
self.default_bot.email, 'zulip', 'acme',
|
||||||
|
)
|
||||||
|
|
||||||
def _change_is_active_field(self, profile: UserProfile, value: bool) -> None:
|
def _change_is_active_field(self, profile: UserProfile, value: bool) -> None:
|
||||||
profile.is_active = value
|
profile.is_active = value
|
||||||
|
@ -1465,8 +1469,9 @@ class TestAuthenticatedJsonPostViewDecorator(ZulipTestCase):
|
||||||
"Account is not associated with this "
|
"Account is not associated with this "
|
||||||
"subdomain")
|
"subdomain")
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
"User {} ({}) attempted to access API on wrong "
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
"subdomain ({})".format(email, 'zulip', ''))
|
email, 'zulip', '',
|
||||||
|
)
|
||||||
|
|
||||||
with mock.patch('logging.warning') as mock_warning, \
|
with mock.patch('logging.warning') as mock_warning, \
|
||||||
mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
|
mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
|
||||||
|
@ -1474,8 +1479,9 @@ class TestAuthenticatedJsonPostViewDecorator(ZulipTestCase):
|
||||||
"Account is not associated with this "
|
"Account is not associated with this "
|
||||||
"subdomain")
|
"subdomain")
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
"User {} ({}) attempted to access API on wrong "
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
"subdomain ({})".format(email, 'zulip', 'acme'))
|
email, 'zulip', 'acme',
|
||||||
|
)
|
||||||
|
|
||||||
def test_authenticated_json_post_view_if_user_is_incoming_webhook(self) -> None:
|
def test_authenticated_json_post_view_if_user_is_incoming_webhook(self) -> None:
|
||||||
bot = self.example_user('webhook_bot')
|
bot = self.example_user('webhook_bot')
|
||||||
|
@ -1520,8 +1526,9 @@ class TestAuthenticatedJsonViewDecorator(ZulipTestCase):
|
||||||
"Account is not associated with this "
|
"Account is not associated with this "
|
||||||
"subdomain")
|
"subdomain")
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
"User {} ({}) attempted to access API on wrong "
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
"subdomain ({})".format(email, 'zulip', ''))
|
email, 'zulip', '',
|
||||||
|
)
|
||||||
|
|
||||||
with mock.patch('logging.warning') as mock_warning, \
|
with mock.patch('logging.warning') as mock_warning, \
|
||||||
mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
|
mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
|
||||||
|
@ -1529,8 +1536,9 @@ class TestAuthenticatedJsonViewDecorator(ZulipTestCase):
|
||||||
"Account is not associated with this "
|
"Account is not associated with this "
|
||||||
"subdomain")
|
"subdomain")
|
||||||
mock_warning.assert_called_with(
|
mock_warning.assert_called_with(
|
||||||
"User {} ({}) attempted to access API on wrong "
|
"User %s (%s) attempted to access API on wrong subdomain (%s)",
|
||||||
"subdomain ({})".format(email, 'zulip', 'acme'))
|
email, 'zulip', 'acme',
|
||||||
|
)
|
||||||
|
|
||||||
def _do_test(self, user_email: str) -> HttpResponse:
|
def _do_test(self, user_email: str) -> HttpResponse:
|
||||||
data = {"password": initial_password(user_email)}
|
data = {"password": initial_password(user_email)}
|
||||||
|
|
|
@ -556,8 +556,10 @@ class TestEmailMirrorMessagesWithAttachments(ZulipTestCase):
|
||||||
|
|
||||||
with mock.patch('zerver.lib.email_mirror.logger.warning') as mock_warn:
|
with mock.patch('zerver.lib.email_mirror.logger.warning') as mock_warn:
|
||||||
process_message(incoming_valid_message)
|
process_message(incoming_valid_message)
|
||||||
mock_warn.assert_called_with("Payload is not bytes (invalid attachment %s in message from %s)." %
|
mock_warn.assert_called_with(
|
||||||
('some_attachment', self.example_email('hamlet')))
|
"Payload is not bytes (invalid attachment %s in message from %s).",
|
||||||
|
'some_attachment', self.example_email('hamlet'),
|
||||||
|
)
|
||||||
|
|
||||||
def test_receive_plaintext_and_html_prefer_text_html_options(self) -> None:
|
def test_receive_plaintext_and_html_prefer_text_html_options(self) -> None:
|
||||||
user_profile = self.example_user('hamlet')
|
user_profile = self.example_user('hamlet')
|
||||||
|
|
|
@ -86,4 +86,6 @@ class TestEmbeddedBotFailures(ZulipTestCase):
|
||||||
content="@**{}** foo".format(bot_profile.full_name),
|
content="@**{}** foo".format(bot_profile.full_name),
|
||||||
topic_name="bar")
|
topic_name="bar")
|
||||||
logging_error_mock.assert_called_once_with(
|
logging_error_mock.assert_called_once_with(
|
||||||
"Error: User {} has bot with invalid embedded bot service invalid".format(bot_profile.id))
|
"Error: User %s has bot with invalid embedded bot service %s",
|
||||||
|
bot_profile.id, "invalid",
|
||||||
|
)
|
||||||
|
|
|
@ -120,5 +120,7 @@ class RateLimitTests(ZulipTestCase):
|
||||||
side_effect=RateLimiterLockingException):
|
side_effect=RateLimiterLockingException):
|
||||||
result = self.send_api_message(user, "some stuff")
|
result = self.send_api_message(user, "some stuff")
|
||||||
self.assertEqual(result.status_code, 429)
|
self.assertEqual(result.status_code, 429)
|
||||||
mock_warn.assert_called_with("Deadlock trying to incr_ratelimit for RateLimitedUser:%s:api_by_user"
|
mock_warn.assert_called_with(
|
||||||
% (user.id,))
|
"Deadlock trying to incr_ratelimit for %s",
|
||||||
|
"RateLimitedUser:%s:api_by_user" % (user.id,),
|
||||||
|
)
|
||||||
|
|
|
@ -460,7 +460,9 @@ class HomeTest(ZulipTestCase):
|
||||||
self.login_user(user_profile)
|
self.login_user(user_profile)
|
||||||
with patch('logging.warning') as mock:
|
with patch('logging.warning') as mock:
|
||||||
result = self._get_home_page()
|
result = self._get_home_page()
|
||||||
mock.assert_called_once_with('User %s has invalid pointer 999999' % (user_profile.id,))
|
mock.assert_called_once_with(
|
||||||
|
'User %s has invalid pointer %s', user_profile.id, 999999,
|
||||||
|
)
|
||||||
self._sanity_check(result)
|
self._sanity_check(result)
|
||||||
|
|
||||||
def test_topic_narrow(self) -> None:
|
def test_topic_narrow(self) -> None:
|
||||||
|
|
|
@ -720,15 +720,17 @@ class HandlePushNotificationTest(PushNotificationTest):
|
||||||
self.user_profile.id, token)
|
self.user_profile.id, token)
|
||||||
for _, _, token in gcm_devices:
|
for _, _, token in gcm_devices:
|
||||||
mock_info.assert_any_call(
|
mock_info.assert_any_call(
|
||||||
"GCM: Sent %s as %s" % (token, message.id))
|
"GCM: Sent %s as %s", token, message.id,
|
||||||
|
)
|
||||||
|
|
||||||
# Now test the unregistered case
|
# Now test the unregistered case
|
||||||
mock_apns.get_notification_result.return_value = ('Unregistered', 1234567)
|
mock_apns.get_notification_result.return_value = ('Unregistered', 1234567)
|
||||||
handle_push_notification(self.user_profile.id, missed_message)
|
handle_push_notification(self.user_profile.id, missed_message)
|
||||||
for _, _, token in apns_devices:
|
for _, _, token in apns_devices:
|
||||||
mock_info.assert_any_call(
|
mock_info.assert_any_call(
|
||||||
"APNs: Removing invalid/expired token %s (%s)" %
|
"APNs: Removing invalid/expired token %s (%s)",
|
||||||
(token, "Unregistered"))
|
token, "Unregistered",
|
||||||
|
)
|
||||||
self.assertEqual(RemotePushDeviceToken.objects.filter(kind=PushDeviceToken.APNS).count(), 0)
|
self.assertEqual(RemotePushDeviceToken.objects.filter(kind=PushDeviceToken.APNS).count(), 0)
|
||||||
|
|
||||||
def test_connection_error(self) -> None:
|
def test_connection_error(self) -> None:
|
||||||
|
@ -978,9 +980,10 @@ class HandlePushNotificationTest(PushNotificationTest):
|
||||||
with mock.patch('zerver.lib.push_notifications.logger.error') as mock_logger, \
|
with mock.patch('zerver.lib.push_notifications.logger.error') as mock_logger, \
|
||||||
mock.patch('zerver.lib.push_notifications.push_notifications_enabled', return_value = True) as mock_push_notifications:
|
mock.patch('zerver.lib.push_notifications.push_notifications_enabled', return_value = True) as mock_push_notifications:
|
||||||
handle_push_notification(self.user_profile.id, missed_message)
|
handle_push_notification(self.user_profile.id, missed_message)
|
||||||
mock_logger.assert_called_with("Could not find UserMessage with "
|
mock_logger.assert_called_with(
|
||||||
"message_id %s and user_id %s" %
|
"Could not find UserMessage with message_id %s and user_id %s",
|
||||||
(message_id, self.user_profile.id,))
|
message_id, self.user_profile.id,
|
||||||
|
)
|
||||||
mock_push_notifications.assert_called_once()
|
mock_push_notifications.assert_called_once()
|
||||||
|
|
||||||
def test_user_message_soft_deactivated(self) -> None:
|
def test_user_message_soft_deactivated(self) -> None:
|
||||||
|
@ -1659,8 +1662,8 @@ class GCMSendTest(PushNotificationTest):
|
||||||
data = self.get_gcm_data()
|
data = self.get_gcm_data()
|
||||||
send_android_push_notification_to_user(self.user_profile, data, {})
|
send_android_push_notification_to_user(self.user_profile, data, {})
|
||||||
self.assertEqual(mock_info.call_count, 2)
|
self.assertEqual(mock_info.call_count, 2)
|
||||||
c1 = call("GCM: Sent 1111 as 0")
|
c1 = call("GCM: Sent %s as %s", "1111", 0)
|
||||||
c2 = call("GCM: Sent 2222 as 1")
|
c2 = call("GCM: Sent %s as %s", "2222", 1)
|
||||||
mock_info.assert_has_calls([c1, c2], any_order=True)
|
mock_info.assert_has_calls([c1, c2], any_order=True)
|
||||||
mock_warning.assert_not_called()
|
mock_warning.assert_not_called()
|
||||||
|
|
||||||
|
@ -1672,8 +1675,9 @@ class GCMSendTest(PushNotificationTest):
|
||||||
|
|
||||||
data = self.get_gcm_data()
|
data = self.get_gcm_data()
|
||||||
send_android_push_notification_to_user(self.user_profile, data, {})
|
send_android_push_notification_to_user(self.user_profile, data, {})
|
||||||
mock_warning.assert_called_once_with("GCM: Got canonical ref but it "
|
mock_warning.assert_called_once_with(
|
||||||
"already matches our ID 1!")
|
"GCM: Got canonical ref but it already matches our ID %s!", 1,
|
||||||
|
)
|
||||||
|
|
||||||
@mock.patch('zerver.lib.push_notifications.logger.warning')
|
@mock.patch('zerver.lib.push_notifications.logger.warning')
|
||||||
def test_canonical_pushdevice_not_present(self, mock_warning: mock.MagicMock,
|
def test_canonical_pushdevice_not_present(self, mock_warning: mock.MagicMock,
|
||||||
|
@ -1697,7 +1701,7 @@ class GCMSendTest(PushNotificationTest):
|
||||||
msg = ("GCM: Got canonical ref %s "
|
msg = ("GCM: Got canonical ref %s "
|
||||||
"replacing %s but new ID not "
|
"replacing %s but new ID not "
|
||||||
"registered! Updating.")
|
"registered! Updating.")
|
||||||
mock_warning.assert_called_once_with(msg % (t2, t1))
|
mock_warning.assert_called_once_with(msg, t2, t1)
|
||||||
|
|
||||||
self.assertEqual(get_count('1111'), 0)
|
self.assertEqual(get_count('1111'), 0)
|
||||||
self.assertEqual(get_count('3333'), 1)
|
self.assertEqual(get_count('3333'), 1)
|
||||||
|
@ -1722,7 +1726,8 @@ class GCMSendTest(PushNotificationTest):
|
||||||
data = self.get_gcm_data()
|
data = self.get_gcm_data()
|
||||||
send_android_push_notification_to_user(self.user_profile, data, {})
|
send_android_push_notification_to_user(self.user_profile, data, {})
|
||||||
mock_info.assert_called_once_with(
|
mock_info.assert_called_once_with(
|
||||||
"GCM: Got canonical ref %s, dropping %s" % (new_token, old_token))
|
"GCM: Got canonical ref %s, dropping %s", new_token, old_token,
|
||||||
|
)
|
||||||
|
|
||||||
self.assertEqual(get_count('1111'), 0)
|
self.assertEqual(get_count('1111'), 0)
|
||||||
self.assertEqual(get_count('2222'), 1)
|
self.assertEqual(get_count('2222'), 1)
|
||||||
|
@ -1743,7 +1748,7 @@ class GCMSendTest(PushNotificationTest):
|
||||||
|
|
||||||
data = self.get_gcm_data()
|
data = self.get_gcm_data()
|
||||||
send_android_push_notification_to_user(self.user_profile, data, {})
|
send_android_push_notification_to_user(self.user_profile, data, {})
|
||||||
mock_info.assert_called_once_with("GCM: Removing %s" % (token,))
|
mock_info.assert_called_once_with("GCM: Removing %s", token)
|
||||||
self.assertEqual(get_count('1111'), 0)
|
self.assertEqual(get_count('1111'), 0)
|
||||||
|
|
||||||
@mock.patch('zerver.lib.push_notifications.logger.warning')
|
@mock.patch('zerver.lib.push_notifications.logger.warning')
|
||||||
|
@ -1755,7 +1760,7 @@ class GCMSendTest(PushNotificationTest):
|
||||||
|
|
||||||
data = self.get_gcm_data()
|
data = self.get_gcm_data()
|
||||||
send_android_push_notification_to_user(self.user_profile, data, {})
|
send_android_push_notification_to_user(self.user_profile, data, {})
|
||||||
c1 = call("GCM: Delivery to %s failed: Failed" % (token,))
|
c1 = call("GCM: Delivery to %s failed: %s", token, "Failed")
|
||||||
mock_warn.assert_has_calls([c1], any_order=True)
|
mock_warn.assert_has_calls([c1], any_order=True)
|
||||||
|
|
||||||
class TestClearOnRead(ZulipTestCase):
|
class TestClearOnRead(ZulipTestCase):
|
||||||
|
|
|
@ -414,9 +414,10 @@ class WorkerTest(ZulipTestCase):
|
||||||
fake_client.queue.append(('email_mirror', data[0]))
|
fake_client.queue.append(('email_mirror', data[0]))
|
||||||
worker.start()
|
worker.start()
|
||||||
self.assertEqual(mock_mirror_email.call_count, 4)
|
self.assertEqual(mock_mirror_email.call_count, 4)
|
||||||
expected_warn = "Deadlock trying to incr_ratelimit for RateLimitedRealmMirror:%s" % (
|
mock_warn.assert_called_with(
|
||||||
realm.string_id,)
|
"Deadlock trying to incr_ratelimit for %s",
|
||||||
mock_warn.assert_called_with(expected_warn)
|
"RateLimitedRealmMirror:%s" % (realm.string_id,),
|
||||||
|
)
|
||||||
|
|
||||||
def test_email_sending_worker_retries(self) -> None:
|
def test_email_sending_worker_retries(self) -> None:
|
||||||
"""Tests the retry_send_email_failures decorator to make sure it
|
"""Tests the retry_send_email_failures decorator to make sure it
|
||||||
|
@ -501,7 +502,9 @@ class WorkerTest(ZulipTestCase):
|
||||||
with patch('logging.warning') as logging_warning_mock:
|
with patch('logging.warning') as logging_warning_mock:
|
||||||
worker.start()
|
worker.start()
|
||||||
logging_warning_mock.assert_called_once_with(
|
logging_warning_mock.assert_called_once_with(
|
||||||
"Attempted to sign up already existing email to list: foo@bar.baz")
|
"Attempted to sign up already existing email to list: %s",
|
||||||
|
"foo@bar.baz",
|
||||||
|
)
|
||||||
|
|
||||||
def test_signups_bad_request(self) -> None:
|
def test_signups_bad_request(self) -> None:
|
||||||
fake_client = self.FakeClient()
|
fake_client = self.FakeClient()
|
||||||
|
|
|
@ -2273,8 +2273,8 @@ class UserSignUpTest(InviteUserBase):
|
||||||
self._assert_redirected_to(result, '/config-error/smtp')
|
self._assert_redirected_to(result, '/config-error/smtp')
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
err.call_args_list[0][0][0],
|
err.call_args_list[0][0],
|
||||||
'Error in accounts_home: uh oh'
|
('Error in accounts_home: %s', 'uh oh'),
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_bad_email_configuration_for_create_realm(self) -> None:
|
def test_bad_email_configuration_for_create_realm(self) -> None:
|
||||||
|
@ -2296,8 +2296,8 @@ class UserSignUpTest(InviteUserBase):
|
||||||
self._assert_redirected_to(result, '/config-error/smtp')
|
self._assert_redirected_to(result, '/config-error/smtp')
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
err.call_args_list[0][0][0],
|
err.call_args_list[0][0],
|
||||||
'Error in create_realm: uh oh'
|
('Error in create_realm: %s', 'uh oh'),
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_user_default_language_and_timezone(self) -> None:
|
def test_user_default_language_and_timezone(self) -> None:
|
||||||
|
@ -3310,7 +3310,10 @@ class UserSignUpTest(InviteUserBase):
|
||||||
# Pass HTTP_HOST for the target subdomain
|
# Pass HTTP_HOST for the target subdomain
|
||||||
HTTP_HOST=subdomain + ".testserver")
|
HTTP_HOST=subdomain + ".testserver")
|
||||||
self.assertEqual(result.status_code, 200)
|
self.assertEqual(result.status_code, 200)
|
||||||
mock_warning.assert_called_once_with("New account email newuser@zulip.com could not be found in LDAP")
|
mock_warning.assert_called_once_with(
|
||||||
|
"New account email %s could not be found in LDAP",
|
||||||
|
"newuser@zulip.com",
|
||||||
|
)
|
||||||
|
|
||||||
result = self.submit_reg_form_for_user(email,
|
result = self.submit_reg_form_for_user(email,
|
||||||
password,
|
password,
|
||||||
|
@ -3396,7 +3399,10 @@ class UserSignUpTest(InviteUserBase):
|
||||||
# Pass HTTP_HOST for the target subdomain
|
# Pass HTTP_HOST for the target subdomain
|
||||||
HTTP_HOST=subdomain + ".testserver")
|
HTTP_HOST=subdomain + ".testserver")
|
||||||
self.assertEqual(result.status_code, 200)
|
self.assertEqual(result.status_code, 200)
|
||||||
mock_warning.assert_called_once_with("New account email nonexistent@zulip.com could not be found in LDAP")
|
mock_warning.assert_called_once_with(
|
||||||
|
"New account email %s could not be found in LDAP",
|
||||||
|
"nonexistent@zulip.com",
|
||||||
|
)
|
||||||
|
|
||||||
result = self.submit_reg_form_for_user(email,
|
result = self.submit_reg_form_for_user(email,
|
||||||
password,
|
password,
|
||||||
|
|
|
@ -207,9 +207,9 @@ class ClientDescriptor:
|
||||||
clear_descriptor_by_handler_id(self.current_handler_id, None)
|
clear_descriptor_by_handler_id(self.current_handler_id, None)
|
||||||
clear_handler_by_id(self.current_handler_id)
|
clear_handler_by_id(self.current_handler_id)
|
||||||
if client_closed:
|
if client_closed:
|
||||||
logging.info("Client disconnected for queue %s (%s via %s)" %
|
logging.info("Client disconnected for queue %s (%s via %s)",
|
||||||
(self.event_queue.id, self.user_profile_id,
|
self.event_queue.id, self.user_profile_id,
|
||||||
self.current_client_name))
|
self.current_client_name)
|
||||||
self.current_handler_id = None
|
self.current_handler_id = None
|
||||||
self.current_client_name = None
|
self.current_client_name = None
|
||||||
if self._timeout_handle is not None:
|
if self._timeout_handle is not None:
|
||||||
|
@ -430,10 +430,10 @@ def gc_event_queues(port: int) -> None:
|
||||||
do_gc_event_queues(to_remove, affected_users, affected_realms)
|
do_gc_event_queues(to_remove, affected_users, affected_realms)
|
||||||
|
|
||||||
if settings.PRODUCTION:
|
if settings.PRODUCTION:
|
||||||
logging.info(('Tornado %d removed %d expired event queues owned by %d users in %.3fs.' +
|
logging.info('Tornado %d removed %d expired event queues owned by %d users in %.3fs.'
|
||||||
' Now %d active queues, %s')
|
' Now %d active queues, %s',
|
||||||
% (port, len(to_remove), len(affected_users), time.time() - start,
|
port, len(to_remove), len(affected_users), time.time() - start,
|
||||||
len(clients), handler_stats_string()))
|
len(clients), handler_stats_string())
|
||||||
statsd.gauge('tornado.active_queues', len(clients))
|
statsd.gauge('tornado.active_queues', len(clients))
|
||||||
statsd.gauge('tornado.active_users', len(user_clients))
|
statsd.gauge('tornado.active_users', len(user_clients))
|
||||||
|
|
||||||
|
@ -454,8 +454,8 @@ def dump_event_queues(port: int) -> None:
|
||||||
ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.items()],
|
ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.items()],
|
||||||
stored_queues)
|
stored_queues)
|
||||||
|
|
||||||
logging.info('Tornado %d dumped %d event queues in %.3fs'
|
logging.info('Tornado %d dumped %d event queues in %.3fs',
|
||||||
% (port, len(clients), time.time() - start))
|
port, len(clients), time.time() - start)
|
||||||
|
|
||||||
def load_event_queues(port: int) -> None:
|
def load_event_queues(port: int) -> None:
|
||||||
global clients
|
global clients
|
||||||
|
@ -481,8 +481,8 @@ def load_event_queues(port: int) -> None:
|
||||||
|
|
||||||
add_to_client_dicts(client)
|
add_to_client_dicts(client)
|
||||||
|
|
||||||
logging.info('Tornado %d loaded %d event queues in %.3fs'
|
logging.info('Tornado %d loaded %d event queues in %.3fs',
|
||||||
% (port, len(clients), time.time() - start))
|
port, len(clients), time.time() - start)
|
||||||
|
|
||||||
def send_restart_events(immediate: bool=False) -> None:
|
def send_restart_events(immediate: bool=False) -> None:
|
||||||
event: Dict[str, Any] = dict(type='restart', server_generation=settings.SERVER_GENERATION)
|
event: Dict[str, Any] = dict(type='restart', server_generation=settings.SERVER_GENERATION)
|
||||||
|
@ -572,8 +572,8 @@ def fetch_events(query: Mapping[str, Any]) -> Dict[str, Any]:
|
||||||
# After this point, dont_block=False, the queue is empty, and we
|
# After this point, dont_block=False, the queue is empty, and we
|
||||||
# have a pre-existing queue, so we wait for new events.
|
# have a pre-existing queue, so we wait for new events.
|
||||||
if was_connected:
|
if was_connected:
|
||||||
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_id,
|
logging.info("Disconnected handler for queue %s (%s/%s)",
|
||||||
client_type_name))
|
queue_id, user_profile_id, client_type_name)
|
||||||
except JsonableError as e:
|
except JsonableError as e:
|
||||||
return dict(type="error", exception=e)
|
return dict(type="error", exception=e)
|
||||||
|
|
||||||
|
@ -608,8 +608,8 @@ def request_event_queue(user_profile: UserProfile, user_client: Client, apply_ma
|
||||||
data=req)
|
data=req)
|
||||||
except requests.adapters.ConnectionError:
|
except requests.adapters.ConnectionError:
|
||||||
logging.error('Tornado server does not seem to be running, check %s '
|
logging.error('Tornado server does not seem to be running, check %s '
|
||||||
'and %s for more information.' %
|
'and %s for more information.',
|
||||||
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
|
settings.ERROR_FILE_LOG_PATH, "tornado.log")
|
||||||
raise requests.adapters.ConnectionError(
|
raise requests.adapters.ConnectionError(
|
||||||
"Django cannot connect to Tornado server (%s); try restarting" %
|
"Django cannot connect to Tornado server (%s); try restarting" %
|
||||||
(tornado_uri,))
|
(tornado_uri,))
|
||||||
|
@ -1083,14 +1083,16 @@ def process_notification(notice: Mapping[str, Any]) -> None:
|
||||||
process_presence_event(event, cast(Iterable[int], users))
|
process_presence_event(event, cast(Iterable[int], users))
|
||||||
else:
|
else:
|
||||||
process_event(event, cast(Iterable[int], users))
|
process_event(event, cast(Iterable[int], users))
|
||||||
logging.debug("Tornado: Event %s for %s users took %sms" % (
|
logging.debug(
|
||||||
event['type'], len(users), int(1000 * (time.time() - start_time))))
|
"Tornado: Event %s for %s users took %sms",
|
||||||
|
event['type'], len(users), int(1000 * (time.time() - start_time)),
|
||||||
|
)
|
||||||
|
|
||||||
def get_wrapped_process_notification(queue_name: str) -> Callable[[Dict[str, Any]], None]:
|
def get_wrapped_process_notification(queue_name: str) -> Callable[[Dict[str, Any]], None]:
|
||||||
def failure_processor(notice: Dict[str, Any]) -> None:
|
def failure_processor(notice: Dict[str, Any]) -> None:
|
||||||
logging.error(
|
logging.error(
|
||||||
"Maximum retries exceeded for Tornado notice:%s\nStack trace:\n%s\n" % (
|
"Maximum retries exceeded for Tornado notice:%s\nStack trace:\n%s\n",
|
||||||
notice, traceback.format_exc()))
|
notice, traceback.format_exc())
|
||||||
|
|
||||||
def wrapped_process_notification(notice: Dict[str, Any]) -> None:
|
def wrapped_process_notification(notice: Dict[str, Any]) -> None:
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -68,8 +68,8 @@ class InstrumentedPoll:
|
||||||
if total > 0:
|
if total > 0:
|
||||||
percent_busy = 100 * (1 - in_poll / total)
|
percent_busy = 100 * (1 - in_poll / total)
|
||||||
if settings.PRODUCTION:
|
if settings.PRODUCTION:
|
||||||
logging.info('Tornado %s %5.1f%% busy over the past %4.1f seconds'
|
logging.info('Tornado %s %5.1f%% busy over the past %4.1f seconds',
|
||||||
% (logging_data.get('port', 'unknown'), percent_busy, total))
|
logging_data.get('port', 'unknown'), percent_busy, total)
|
||||||
self._last_print = t1
|
self._last_print = t1
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
@ -461,8 +461,8 @@ def start_social_login(request: HttpRequest, backend: str, extra_arg: Optional[s
|
||||||
# This backend requires the name of the IdP (from the list of configured ones)
|
# This backend requires the name of the IdP (from the list of configured ones)
|
||||||
# to be passed as the parameter.
|
# to be passed as the parameter.
|
||||||
if not extra_arg or extra_arg not in settings.SOCIAL_AUTH_SAML_ENABLED_IDPS:
|
if not extra_arg or extra_arg not in settings.SOCIAL_AUTH_SAML_ENABLED_IDPS:
|
||||||
logging.info("Attempted to initiate SAML authentication with wrong idp argument: {}"
|
logging.info("Attempted to initiate SAML authentication with wrong idp argument: %s",
|
||||||
.format(extra_arg))
|
extra_arg)
|
||||||
return redirect_to_config_error("saml")
|
return redirect_to_config_error("saml")
|
||||||
extra_url_params = {'idp': extra_arg}
|
extra_url_params = {'idp': extra_arg}
|
||||||
|
|
||||||
|
@ -485,8 +485,8 @@ def start_social_signup(request: HttpRequest, backend: str, extra_arg: Optional[
|
||||||
return result
|
return result
|
||||||
|
|
||||||
if not extra_arg or extra_arg not in settings.SOCIAL_AUTH_SAML_ENABLED_IDPS:
|
if not extra_arg or extra_arg not in settings.SOCIAL_AUTH_SAML_ENABLED_IDPS:
|
||||||
logging.info("Attempted to initiate SAML authentication with wrong idp argument: {}"
|
logging.info("Attempted to initiate SAML authentication with wrong idp argument: %s",
|
||||||
.format(extra_arg))
|
extra_arg)
|
||||||
return redirect_to_config_error("saml")
|
return redirect_to_config_error("saml")
|
||||||
extra_url_params = {'idp': extra_arg}
|
extra_url_params = {'idp': extra_arg}
|
||||||
return oauth_redirect_to_root(request, backend_url, 'social', is_signup=True,
|
return oauth_redirect_to_root(request, backend_url, 'social', is_signup=True,
|
||||||
|
@ -502,13 +502,13 @@ def log_into_subdomain(request: HttpRequest, token: str) -> HttpResponse:
|
||||||
result data that has been stored in redis, associated with this token.
|
result data that has been stored in redis, associated with this token.
|
||||||
"""
|
"""
|
||||||
if not has_api_key_format(token): # The tokens are intended to have the same format as API keys.
|
if not has_api_key_format(token): # The tokens are intended to have the same format as API keys.
|
||||||
logging.warning("log_into_subdomain: Malformed token given: %s" % (token,))
|
logging.warning("log_into_subdomain: Malformed token given: %s", token)
|
||||||
return HttpResponse(status=400)
|
return HttpResponse(status=400)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = ExternalAuthResult(login_token=token)
|
result = ExternalAuthResult(login_token=token)
|
||||||
except ExternalAuthResult.InvalidTokenError:
|
except ExternalAuthResult.InvalidTokenError:
|
||||||
logging.warning("log_into_subdomain: Invalid token given: %s" % (token,))
|
logging.warning("log_into_subdomain: Invalid token given: %s", token)
|
||||||
return render(request, 'zerver/log_into_subdomain_token_invalid.html', status=400)
|
return render(request, 'zerver/log_into_subdomain_token_invalid.html', status=400)
|
||||||
|
|
||||||
subdomain = get_subdomain(request)
|
subdomain = get_subdomain(request)
|
||||||
|
|
|
@ -226,7 +226,7 @@ def home_real(request: HttpRequest) -> HttpResponse:
|
||||||
latest_read = get_usermessage_by_message_id(user_profile, user_profile.pointer)
|
latest_read = get_usermessage_by_message_id(user_profile, user_profile.pointer)
|
||||||
if latest_read is None:
|
if latest_read is None:
|
||||||
# Don't completely fail if your saved pointer ID is invalid
|
# Don't completely fail if your saved pointer ID is invalid
|
||||||
logging.warning("User %s has invalid pointer %s" % (user_profile.id, user_profile.pointer))
|
logging.warning("User %s has invalid pointer %s", user_profile.id, user_profile.pointer)
|
||||||
furthest_read_time = sent_time_in_epoch_seconds(latest_read)
|
furthest_read_time = sent_time_in_epoch_seconds(latest_read)
|
||||||
|
|
||||||
# We pick a language for the user as follows:
|
# We pick a language for the user as follows:
|
||||||
|
|
|
@ -136,7 +136,7 @@ def accounts_register(request: HttpRequest) -> HttpResponse:
|
||||||
try:
|
try:
|
||||||
ldap_username = backend.django_to_ldap_username(email)
|
ldap_username = backend.django_to_ldap_username(email)
|
||||||
except ZulipLDAPExceptionNoMatchingLDAPUser:
|
except ZulipLDAPExceptionNoMatchingLDAPUser:
|
||||||
logging.warning("New account email %s could not be found in LDAP" % (email,))
|
logging.warning("New account email %s could not be found in LDAP", email)
|
||||||
break
|
break
|
||||||
|
|
||||||
# Note that this `ldap_user` object is not a
|
# Note that this `ldap_user` object is not a
|
||||||
|
@ -353,8 +353,10 @@ def accounts_register(request: HttpRequest) -> HttpResponse:
|
||||||
use_dummy_backend=True)
|
use_dummy_backend=True)
|
||||||
if return_data.get('invalid_subdomain'):
|
if return_data.get('invalid_subdomain'):
|
||||||
# By construction, this should never happen.
|
# By construction, this should never happen.
|
||||||
logging.error("Subdomain mismatch in registration %s: %s" % (
|
logging.error(
|
||||||
realm.subdomain, user_profile.delivery_email,))
|
"Subdomain mismatch in registration %s: %s",
|
||||||
|
realm.subdomain, user_profile.delivery_email,
|
||||||
|
)
|
||||||
return redirect('/')
|
return redirect('/')
|
||||||
|
|
||||||
return login_and_go_to_home(request, auth_result)
|
return login_and_go_to_home(request, auth_result)
|
||||||
|
@ -464,7 +466,7 @@ def create_realm(request: HttpRequest, creation_key: Optional[str]=None) -> Http
|
||||||
try:
|
try:
|
||||||
send_confirm_registration_email(email, activation_url, request.LANGUAGE_CODE)
|
send_confirm_registration_email(email, activation_url, request.LANGUAGE_CODE)
|
||||||
except smtplib.SMTPException as e:
|
except smtplib.SMTPException as e:
|
||||||
logging.error('Error in create_realm: %s' % (str(e),))
|
logging.error('Error in create_realm: %s', str(e))
|
||||||
return HttpResponseRedirect("/config-error/smtp")
|
return HttpResponseRedirect("/config-error/smtp")
|
||||||
|
|
||||||
if key_record is not None:
|
if key_record is not None:
|
||||||
|
@ -505,7 +507,7 @@ def accounts_home(request: HttpRequest, multiuse_object_key: Optional[str]="",
|
||||||
try:
|
try:
|
||||||
send_confirm_registration_email(email, activation_url, request.LANGUAGE_CODE)
|
send_confirm_registration_email(email, activation_url, request.LANGUAGE_CODE)
|
||||||
except smtplib.SMTPException as e:
|
except smtplib.SMTPException as e:
|
||||||
logging.error('Error in accounts_home: %s' % (str(e),))
|
logging.error('Error in accounts_home: %s', str(e))
|
||||||
return HttpResponseRedirect("/config-error/smtp")
|
return HttpResponseRedirect("/config-error/smtp")
|
||||||
|
|
||||||
return HttpResponseRedirect(reverse('signup_send_confirm', kwargs={'email': email}))
|
return HttpResponseRedirect(reverse('signup_send_confirm', kwargs={'email': email}))
|
||||||
|
|
|
@ -163,14 +163,15 @@ def report_csp_violations(request: HttpRequest,
|
||||||
"Blocked URI('%s'), Original Policy('%s'), "
|
"Blocked URI('%s'), Original Policy('%s'), "
|
||||||
"Violated Directive('%s'), Effective Directive('%s'), "
|
"Violated Directive('%s'), Effective Directive('%s'), "
|
||||||
"Disposition('%s'), Referrer('%s'), "
|
"Disposition('%s'), Referrer('%s'), "
|
||||||
"Status Code('%s'), Script Sample('%s')" % (get_attr('document-uri'),
|
"Status Code('%s'), Script Sample('%s')",
|
||||||
get_attr('blocked-uri'),
|
get_attr('document-uri'),
|
||||||
get_attr('original-policy'),
|
get_attr('blocked-uri'),
|
||||||
get_attr('violated-directive'),
|
get_attr('original-policy'),
|
||||||
get_attr('effective-directive'),
|
get_attr('violated-directive'),
|
||||||
get_attr('disposition'),
|
get_attr('effective-directive'),
|
||||||
get_attr('referrer'),
|
get_attr('disposition'),
|
||||||
get_attr('status-code'),
|
get_attr('referrer'),
|
||||||
get_attr('script-sample')))
|
get_attr('status-code'),
|
||||||
|
get_attr('script-sample'))
|
||||||
|
|
||||||
return json_success()
|
return json_success()
|
||||||
|
|
|
@ -119,7 +119,7 @@ def api_teamcity_webhook(request: HttpRequest, user_profile: UserProfile,
|
||||||
if teamcity_user is None:
|
if teamcity_user is None:
|
||||||
# We can't figure out who started this build - there's nothing we can do here.
|
# We can't figure out who started this build - there's nothing we can do here.
|
||||||
logging.info("Teamcity webhook couldn't find a matching Zulip user for "
|
logging.info("Teamcity webhook couldn't find a matching Zulip user for "
|
||||||
"Teamcity user '%s' or '%s'" % (teamcity_fullname, teamcity_shortname))
|
"Teamcity user '%s' or '%s'", teamcity_fullname, teamcity_shortname)
|
||||||
return json_success()
|
return json_success()
|
||||||
|
|
||||||
body = "Your personal build for {}".format(body)
|
body = "Your personal build for {}".format(body)
|
||||||
|
|
|
@ -255,8 +255,10 @@ class SignupWorker(QueueProcessingWorker):
|
||||||
def consume(self, data: Dict[str, Any]) -> None:
|
def consume(self, data: Dict[str, Any]) -> None:
|
||||||
# TODO: This is the only implementation with Dict cf Mapping; should we simplify?
|
# TODO: This is the only implementation with Dict cf Mapping; should we simplify?
|
||||||
user_profile = get_user_profile_by_id(data['user_id'])
|
user_profile = get_user_profile_by_id(data['user_id'])
|
||||||
logging.info("Processing signup for user %s in realm %s" % (
|
logging.info(
|
||||||
user_profile.id, user_profile.realm.string_id))
|
"Processing signup for user %s in realm %s",
|
||||||
|
user_profile.id, user_profile.realm.string_id,
|
||||||
|
)
|
||||||
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
|
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
|
||||||
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
|
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
|
||||||
(settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
|
(settings.MAILCHIMP_API_KEY.split('-')[1], settings.ZULIP_FRIENDS_LIST_ID)
|
||||||
|
@ -266,8 +268,8 @@ class SignupWorker(QueueProcessingWorker):
|
||||||
params['status'] = 'subscribed'
|
params['status'] = 'subscribed'
|
||||||
r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10)
|
r = requests.post(endpoint, auth=('apikey', settings.MAILCHIMP_API_KEY), json=params, timeout=10)
|
||||||
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
|
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
|
||||||
logging.warning("Attempted to sign up already existing email to list: %s" %
|
logging.warning("Attempted to sign up already existing email to list: %s",
|
||||||
(data['email_address'],))
|
data['email_address'])
|
||||||
elif r.status_code == 400:
|
elif r.status_code == 400:
|
||||||
retry_event(self.queue_name, data, lambda e: r.raise_for_status())
|
retry_event(self.queue_name, data, lambda e: r.raise_for_status())
|
||||||
else:
|
else:
|
||||||
|
@ -288,7 +290,7 @@ class ConfirmationEmailWorker(QueueProcessingWorker):
|
||||||
return
|
return
|
||||||
|
|
||||||
referrer = get_user_profile_by_id(data["referrer_id"])
|
referrer = get_user_profile_by_id(data["referrer_id"])
|
||||||
logger.info("Sending invitation for realm %s to %s" % (referrer.realm.string_id, invitee.email))
|
logger.info("Sending invitation for realm %s to %s", referrer.realm.string_id, invitee.email)
|
||||||
activate_url = do_send_confirmation_email(invitee, referrer)
|
activate_url = do_send_confirmation_email(invitee, referrer)
|
||||||
|
|
||||||
# queue invitation reminder
|
# queue invitation reminder
|
||||||
|
@ -378,7 +380,7 @@ class UserActivityIntervalWorker(QueueProcessingWorker):
|
||||||
@assign_queue('user_presence')
|
@assign_queue('user_presence')
|
||||||
class UserPresenceWorker(QueueProcessingWorker):
|
class UserPresenceWorker(QueueProcessingWorker):
|
||||||
def consume(self, event: Mapping[str, Any]) -> None:
|
def consume(self, event: Mapping[str, Any]) -> None:
|
||||||
logging.debug("Received presence event: %s" % (event,),)
|
logging.debug("Received presence event: %s", event)
|
||||||
user_profile = get_user_profile_by_id(event["user_profile_id"])
|
user_profile = get_user_profile_by_id(event["user_profile_id"])
|
||||||
client = get_client(event["client"])
|
client = get_client(event["client"])
|
||||||
log_time = timestamp_to_datetime(event["time"])
|
log_time = timestamp_to_datetime(event["time"])
|
||||||
|
@ -406,7 +408,7 @@ class MissedMessageWorker(QueueProcessingWorker):
|
||||||
batch_start_by_recipient: Dict[int, float] = {}
|
batch_start_by_recipient: Dict[int, float] = {}
|
||||||
|
|
||||||
def consume(self, event: Dict[str, Any]) -> None:
|
def consume(self, event: Dict[str, Any]) -> None:
|
||||||
logging.debug("Received missedmessage_emails event: %s" % (event,))
|
logging.debug("Received missedmessage_emails event: %s", event)
|
||||||
|
|
||||||
# When we process an event, just put it into the queue and ensure we have a timer going.
|
# When we process an event, just put it into the queue and ensure we have a timer going.
|
||||||
user_profile_id = event['user_profile_id']
|
user_profile_id = event['user_profile_id']
|
||||||
|
@ -435,8 +437,8 @@ class MissedMessageWorker(QueueProcessingWorker):
|
||||||
if current_time - timestamp < self.BATCH_DURATION:
|
if current_time - timestamp < self.BATCH_DURATION:
|
||||||
continue
|
continue
|
||||||
events = self.events_by_recipient[user_profile_id]
|
events = self.events_by_recipient[user_profile_id]
|
||||||
logging.info("Batch-processing %s missedmessage_emails events for user %s" %
|
logging.info("Batch-processing %s missedmessage_emails events for user %s",
|
||||||
(len(events), user_profile_id))
|
len(events), user_profile_id)
|
||||||
handle_missedmessage_emails(user_profile_id, events)
|
handle_missedmessage_emails(user_profile_id, events)
|
||||||
del self.events_by_recipient[user_profile_id]
|
del self.events_by_recipient[user_profile_id]
|
||||||
del self.batch_start_by_recipient[user_profile_id]
|
del self.batch_start_by_recipient[user_profile_id]
|
||||||
|
@ -481,14 +483,14 @@ class PushNotificationsWorker(QueueProcessingWorker): # nocoverage
|
||||||
except PushNotificationBouncerRetryLaterError:
|
except PushNotificationBouncerRetryLaterError:
|
||||||
def failure_processor(event: Dict[str, Any]) -> None:
|
def failure_processor(event: Dict[str, Any]) -> None:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Maximum retries exceeded for trigger:%s event:push_notification" % (
|
"Maximum retries exceeded for trigger:%s event:push_notification",
|
||||||
event['user_profile_id'],))
|
event['user_profile_id'])
|
||||||
retry_event(self.queue_name, event, failure_processor)
|
retry_event(self.queue_name, event, failure_processor)
|
||||||
|
|
||||||
@assign_queue('error_reports')
|
@assign_queue('error_reports')
|
||||||
class ErrorReporter(QueueProcessingWorker):
|
class ErrorReporter(QueueProcessingWorker):
|
||||||
def consume(self, event: Mapping[str, Any]) -> None:
|
def consume(self, event: Mapping[str, Any]) -> None:
|
||||||
logging.info("Processing traceback with type %s for %s" % (event['type'], event.get('user_email')))
|
logging.info("Processing traceback with type %s for %s", event['type'], event.get('user_email'))
|
||||||
if settings.ERROR_REPORTING:
|
if settings.ERROR_REPORTING:
|
||||||
do_report_error(event['report']['host'], event['type'], event['report'])
|
do_report_error(event['report']['host'], event['type'], event['report'])
|
||||||
|
|
||||||
|
@ -501,7 +503,7 @@ class SlowQueryWorker(LoopQueueProcessingWorker):
|
||||||
|
|
||||||
def consume_batch(self, slow_query_events: List[Dict[str, Any]]) -> None:
|
def consume_batch(self, slow_query_events: List[Dict[str, Any]]) -> None:
|
||||||
for event in slow_query_events:
|
for event in slow_query_events:
|
||||||
logging.info("Slow query: %s" % (event["query"],))
|
logging.info("Slow query: %s", event["query"])
|
||||||
|
|
||||||
if settings.SLOW_QUERY_LOGS_STREAM is None:
|
if settings.SLOW_QUERY_LOGS_STREAM is None:
|
||||||
return
|
return
|
||||||
|
@ -535,7 +537,7 @@ class DigestWorker(QueueProcessingWorker): # nocoverage
|
||||||
# Who gets a digest is entirely determined by the enqueue_digest_emails
|
# Who gets a digest is entirely determined by the enqueue_digest_emails
|
||||||
# management command, not here.
|
# management command, not here.
|
||||||
def consume(self, event: Mapping[str, Any]) -> None:
|
def consume(self, event: Mapping[str, Any]) -> None:
|
||||||
logging.info("Received digest event: %s" % (event,))
|
logging.info("Received digest event: %s", event)
|
||||||
handle_digest_email(event["user_profile_id"], event["cutoff"])
|
handle_digest_email(event["user_profile_id"], event["cutoff"])
|
||||||
|
|
||||||
@assign_queue('email_mirror')
|
@assign_queue('email_mirror')
|
||||||
|
@ -551,8 +553,8 @@ class MirrorWorker(QueueProcessingWorker):
|
||||||
except RateLimited:
|
except RateLimited:
|
||||||
msg = email.message_from_string(event["message"])
|
msg = email.message_from_string(event["message"])
|
||||||
logger.warning("MirrorWorker: Rejecting an email from: %s "
|
logger.warning("MirrorWorker: Rejecting an email from: %s "
|
||||||
"to realm: %s - rate limited."
|
"to realm: %s - rate limited.",
|
||||||
% (msg['From'], recipient_realm.name))
|
msg['From'], recipient_realm.name)
|
||||||
return
|
return
|
||||||
|
|
||||||
mirror_email(email.message_from_string(event["message"]),
|
mirror_email(email.message_from_string(event["message"]),
|
||||||
|
@ -567,7 +569,7 @@ class TestWorker(QueueProcessingWorker):
|
||||||
def consume(self, event: Mapping[str, Any]) -> None: # nocoverage
|
def consume(self, event: Mapping[str, Any]) -> None: # nocoverage
|
||||||
fn = settings.ZULIP_WORKER_TEST_FILE
|
fn = settings.ZULIP_WORKER_TEST_FILE
|
||||||
message = ujson.dumps(event)
|
message = ujson.dumps(event)
|
||||||
logging.info("TestWorker should append this message to %s: %s" % (fn, message))
|
logging.info("TestWorker should append this message to %s: %s", fn, message)
|
||||||
with open(fn, 'a') as f:
|
with open(fn, 'a') as f:
|
||||||
f.write(message + '\n')
|
f.write(message + '\n')
|
||||||
|
|
||||||
|
@ -635,8 +637,10 @@ class EmbeddedBotWorker(QueueProcessingWorker):
|
||||||
for service in services:
|
for service in services:
|
||||||
bot_handler = get_bot_handler(str(service.name))
|
bot_handler = get_bot_handler(str(service.name))
|
||||||
if bot_handler is None:
|
if bot_handler is None:
|
||||||
logging.error("Error: User %s has bot with invalid embedded bot service %s" % (
|
logging.error(
|
||||||
user_profile_id, service.name))
|
"Error: User %s has bot with invalid embedded bot service %s",
|
||||||
|
user_profile_id, service.name,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
if hasattr(bot_handler, 'initialize'):
|
if hasattr(bot_handler, 'initialize'):
|
||||||
|
@ -674,8 +678,8 @@ class DeferredWorker(QueueProcessingWorker):
|
||||||
except PushNotificationBouncerRetryLaterError:
|
except PushNotificationBouncerRetryLaterError:
|
||||||
def failure_processor(event: Dict[str, Any]) -> None:
|
def failure_processor(event: Dict[str, Any]) -> None:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Maximum retries exceeded for trigger:%s event:clear_push_device_tokens" % (
|
"Maximum retries exceeded for trigger:%s event:clear_push_device_tokens",
|
||||||
event['user_profile_id'],))
|
event['user_profile_id'])
|
||||||
retry_event(self.queue_name, event, failure_processor)
|
retry_event(self.queue_name, event, failure_processor)
|
||||||
elif event['type'] == 'realm_export':
|
elif event['type'] == 'realm_export':
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
@ -693,8 +697,10 @@ class DeferredWorker(QueueProcessingWorker):
|
||||||
failed_timestamp=timezone_now().timestamp()
|
failed_timestamp=timezone_now().timestamp()
|
||||||
))
|
))
|
||||||
export_event.save(update_fields=['extra_data'])
|
export_event.save(update_fields=['extra_data'])
|
||||||
logging.error("Data export for %s failed after %s" % (
|
logging.error(
|
||||||
user_profile.realm.string_id, time.time() - start))
|
"Data export for %s failed after %s",
|
||||||
|
user_profile.realm.string_id, time.time() - start,
|
||||||
|
)
|
||||||
notify_realm_export(user_profile)
|
notify_realm_export(user_profile)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -720,5 +726,7 @@ class DeferredWorker(QueueProcessingWorker):
|
||||||
# For future frontend use, also notify administrator
|
# For future frontend use, also notify administrator
|
||||||
# clients that the export happened.
|
# clients that the export happened.
|
||||||
notify_realm_export(user_profile)
|
notify_realm_export(user_profile)
|
||||||
logging.info("Completed data export for %s in %s" % (
|
logging.info(
|
||||||
user_profile.realm.string_id, time.time() - start))
|
"Completed data export for %s in %s",
|
||||||
|
user_profile.realm.string_id, time.time() - start,
|
||||||
|
)
|
||||||
|
|
|
@ -45,7 +45,7 @@ def profile_request(request: HttpRequest) -> HttpResponse:
|
||||||
with tempfile.NamedTemporaryFile(prefix='profile.data.', delete=False) as stats_file:
|
with tempfile.NamedTemporaryFile(prefix='profile.data.', delete=False) as stats_file:
|
||||||
prof.dump_stats(stats_file.name)
|
prof.dump_stats(stats_file.name)
|
||||||
request_logger.process_response(request, ret)
|
request_logger.process_response(request, ret)
|
||||||
logging.info("Profiling data written to {}".format(stats_file.name))
|
logging.info("Profiling data written to %s", stats_file.name)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
class Command(ZulipBaseCommand):
|
class Command(ZulipBaseCommand):
|
||||||
|
|
|
@ -176,8 +176,10 @@ def batch_create_table_data(server: RemoteZulipServer, model: Any,
|
||||||
try:
|
try:
|
||||||
model.objects.bulk_create(row_objects[:BATCH_SIZE])
|
model.objects.bulk_create(row_objects[:BATCH_SIZE])
|
||||||
except IntegrityError:
|
except IntegrityError:
|
||||||
logging.warning("Invalid data saving %s for server %s/%s" % (
|
logging.warning(
|
||||||
model._meta.db_table, server.hostname, server.uuid))
|
"Invalid data saving %s for server %s/%s",
|
||||||
|
model._meta.db_table, server.hostname, server.uuid,
|
||||||
|
)
|
||||||
raise JsonableError(_("Invalid data."))
|
raise JsonableError(_("Invalid data."))
|
||||||
row_objects = row_objects[BATCH_SIZE:]
|
row_objects = row_objects[BATCH_SIZE:]
|
||||||
|
|
||||||
|
|
|
@ -433,7 +433,7 @@ class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend):
|
||||||
# This is possible, but strange, so worth logging a warning about.
|
# This is possible, but strange, so worth logging a warning about.
|
||||||
# We can't translate the email to a unique username,
|
# We can't translate the email to a unique username,
|
||||||
# so we don't do anything else here.
|
# so we don't do anything else here.
|
||||||
logging.warning("Multiple users with email {} found in LDAP.".format(username))
|
logging.warning("Multiple users with email %s found in LDAP.", username)
|
||||||
result = username
|
result = username
|
||||||
|
|
||||||
if _LDAPUser(self, result).attrs is None:
|
if _LDAPUser(self, result).attrs is None:
|
||||||
|
@ -509,8 +509,8 @@ class ZulipLDAPAuthBackendBase(ZulipAuthMixin, LDAPBackend):
|
||||||
user.avatar_hash = user_avatar_content_hash(ldap_avatar)
|
user.avatar_hash = user_avatar_content_hash(ldap_avatar)
|
||||||
user.save(update_fields=["avatar_hash"])
|
user.save(update_fields=["avatar_hash"])
|
||||||
else:
|
else:
|
||||||
logging.warning("Could not parse %s field for user %s" %
|
logging.warning("Could not parse %s field for user %s",
|
||||||
(avatar_attr_name, user.id))
|
avatar_attr_name, user.id)
|
||||||
|
|
||||||
def is_account_control_disabled_user(self, ldap_user: _LDAPUser) -> bool:
|
def is_account_control_disabled_user(self, ldap_user: _LDAPUser) -> bool:
|
||||||
"""Implements the userAccountControl check for whether a user has been
|
"""Implements the userAccountControl check for whether a user has been
|
||||||
|
@ -761,14 +761,14 @@ class ZulipLDAPUserPopulator(ZulipLDAPAuthBackendBase):
|
||||||
user_disabled_in_ldap = self.is_account_control_disabled_user(ldap_user)
|
user_disabled_in_ldap = self.is_account_control_disabled_user(ldap_user)
|
||||||
if user_disabled_in_ldap:
|
if user_disabled_in_ldap:
|
||||||
if user.is_active:
|
if user.is_active:
|
||||||
logging.info("Deactivating user %s because they are disabled in LDAP." %
|
logging.info("Deactivating user %s because they are disabled in LDAP.",
|
||||||
(user.delivery_email,))
|
user.delivery_email)
|
||||||
do_deactivate_user(user)
|
do_deactivate_user(user)
|
||||||
# Do an early return to avoid trying to sync additional data.
|
# Do an early return to avoid trying to sync additional data.
|
||||||
return (user, built)
|
return (user, built)
|
||||||
elif not user.is_active:
|
elif not user.is_active:
|
||||||
logging.info("Reactivating user %s because they are not disabled in LDAP." %
|
logging.info("Reactivating user %s because they are not disabled in LDAP.",
|
||||||
(user.delivery_email,))
|
user.delivery_email)
|
||||||
do_reactivate_user(user)
|
do_reactivate_user(user)
|
||||||
|
|
||||||
self.sync_avatar_from_ldap(user, ldap_user)
|
self.sync_avatar_from_ldap(user, ldap_user)
|
||||||
|
@ -801,10 +801,10 @@ def sync_user_from_ldap(user_profile: UserProfile, logger: logging.Logger) -> bo
|
||||||
except ZulipLDAPExceptionNoMatchingLDAPUser:
|
except ZulipLDAPExceptionNoMatchingLDAPUser:
|
||||||
if settings.LDAP_DEACTIVATE_NON_MATCHING_USERS:
|
if settings.LDAP_DEACTIVATE_NON_MATCHING_USERS:
|
||||||
do_deactivate_user(user_profile)
|
do_deactivate_user(user_profile)
|
||||||
logger.info("Deactivated non-matching user: %s" % (user_profile.delivery_email,))
|
logger.info("Deactivated non-matching user: %s", user_profile.delivery_email)
|
||||||
return True
|
return True
|
||||||
elif user_profile.is_active:
|
elif user_profile.is_active:
|
||||||
logger.warning("Did not find %s in LDAP." % (user_profile.delivery_email,))
|
logger.warning("Did not find %s in LDAP.", user_profile.delivery_email)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# What one would expect to see like to do here is just a call to
|
# What one would expect to see like to do here is just a call to
|
||||||
|
@ -824,7 +824,7 @@ def sync_user_from_ldap(user_profile: UserProfile, logger: logging.Logger) -> bo
|
||||||
# making this flow possible in a more directly supported fashion.
|
# making this flow possible in a more directly supported fashion.
|
||||||
updated_user = ZulipLDAPUser(backend, ldap_username, realm=user_profile.realm).populate_user()
|
updated_user = ZulipLDAPUser(backend, ldap_username, realm=user_profile.realm).populate_user()
|
||||||
if updated_user:
|
if updated_user:
|
||||||
logger.info("Updated %s." % (user_profile.delivery_email,))
|
logger.info("Updated %s.", user_profile.delivery_email)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
raise PopulateUserLDAPError("populate_user unexpectedly returned {}".format(updated_user))
|
raise PopulateUserLDAPError("populate_user unexpectedly returned {}".format(updated_user))
|
||||||
|
@ -1075,8 +1075,8 @@ def social_associate_user_helper(backend: BaseAuth, return_data: Dict[str, Any],
|
||||||
if verified_emails_length == 0:
|
if verified_emails_length == 0:
|
||||||
# TODO: Provide a nice error message screen to the user
|
# TODO: Provide a nice error message screen to the user
|
||||||
# for this case, rather than just logging a warning.
|
# for this case, rather than just logging a warning.
|
||||||
logging.warning("Social auth (%s) failed because user has no verified emails" %
|
logging.warning("Social auth (%s) failed because user has no verified emails",
|
||||||
(backend.auth_backend_name,))
|
backend.auth_backend_name)
|
||||||
return_data["email_not_verified"] = True
|
return_data["email_not_verified"] = True
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -1114,8 +1114,8 @@ def social_associate_user_helper(backend: BaseAuth, return_data: Dict[str, Any],
|
||||||
# end up with a wrong email associated with the account. The below code
|
# end up with a wrong email associated with the account. The below code
|
||||||
# takes care of that.
|
# takes care of that.
|
||||||
logging.warning("Social auth (%s) failed because user has no verified"
|
logging.warning("Social auth (%s) failed because user has no verified"
|
||||||
" emails associated with the account" %
|
" emails associated with the account",
|
||||||
(backend.auth_backend_name,))
|
backend.auth_backend_name)
|
||||||
return_data["email_not_associated"] = True
|
return_data["email_not_associated"] = True
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -1230,7 +1230,7 @@ def social_auth_finish(backend: Any,
|
||||||
# In case of invalid email, we will end up on registration page.
|
# In case of invalid email, we will end up on registration page.
|
||||||
# This seems better than redirecting to login page.
|
# This seems better than redirecting to login page.
|
||||||
logging.warning(
|
logging.warning(
|
||||||
"{} got invalid email argument.".format(backend.auth_backend_name)
|
"%s got invalid email argument.", backend.auth_backend_name,
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -1504,7 +1504,7 @@ class SAMLAuthBackend(SocialAuthMixin, SAMLAuth):
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
# If the above raise KeyError, it means invalid or no idp was specified,
|
# If the above raise KeyError, it means invalid or no idp was specified,
|
||||||
# we should log that and redirect to the login page.
|
# we should log that and redirect to the login page.
|
||||||
logging.info("/login/saml/ : Bad idp param: KeyError: {}.".format(e))
|
logging.info("/login/saml/ : Bad idp param: KeyError: %s.", str(e))
|
||||||
return reverse('zerver.views.auth.login_page',
|
return reverse('zerver.views.auth.login_page',
|
||||||
kwargs = {'template_name': 'zerver/login.html'})
|
kwargs = {'template_name': 'zerver/login.html'})
|
||||||
|
|
||||||
|
@ -1575,14 +1575,14 @@ class SAMLAuthBackend(SocialAuthMixin, SAMLAuth):
|
||||||
subdomain = relayed_params.get("subdomain")
|
subdomain = relayed_params.get("subdomain")
|
||||||
if idp_name is None or subdomain is None:
|
if idp_name is None or subdomain is None:
|
||||||
error_msg = "Missing idp or subdomain value in relayed_params in SAML auth_complete: %s"
|
error_msg = "Missing idp or subdomain value in relayed_params in SAML auth_complete: %s"
|
||||||
logging.info(error_msg % (dict(subdomain=subdomain, idp=idp_name),))
|
logging.info(error_msg, dict(subdomain=subdomain, idp=idp_name))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
idp_valid = self.validate_idp_for_subdomain(idp_name, subdomain)
|
idp_valid = self.validate_idp_for_subdomain(idp_name, subdomain)
|
||||||
if not idp_valid:
|
if not idp_valid:
|
||||||
error_msg = "User authenticated with IdP %s but this provider is not " + \
|
error_msg = "User authenticated with IdP %s but this provider is not " + \
|
||||||
"enabled for this realm %s."
|
"enabled for this realm %s."
|
||||||
logging.info(error_msg % (idp_name, subdomain))
|
logging.info(error_msg, idp_name, subdomain)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
result = None
|
result = None
|
||||||
|
|
|
@ -94,5 +94,5 @@ class EmailLogBackEnd(BaseEmailBackend):
|
||||||
if settings.DEVELOPMENT_LOG_EMAILS:
|
if settings.DEVELOPMENT_LOG_EMAILS:
|
||||||
self.log_email(email)
|
self.log_email(email)
|
||||||
email_log_url = settings.ROOT_DOMAIN_URI + "/emails"
|
email_log_url = settings.ROOT_DOMAIN_URI + "/emails"
|
||||||
logging.info("Emails sent in development are available at %s" % (email_log_url,))
|
logging.info("Emails sent in development are available at %s", email_log_url)
|
||||||
return len(email_messages)
|
return len(email_messages)
|
||||||
|
|
Loading…
Reference in New Issue