black: Reformat with Black 23.

Black 23 enforces some slightly more specific rules about empty line
counts and redundant parenthesis removal, but the result is still
compatible with Black 22.

(This does not actually upgrade our Python environment to Black 23
yet.)

Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
Anders Kaseorg 2023-02-01 19:35:24 -08:00 committed by Tim Abbott
parent bbf5b3d6ce
commit df001db1a9
581 changed files with 57 additions and 721 deletions

View File

@ -288,6 +288,7 @@ def do_aggregate_to_summary_table(
## Utility functions called from outside counts.py ##
# called from zerver.actions; should not throw any errors
def do_increment_logging_stat(
zerver_object: Union[Realm, UserProfile, Stream],

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0030_realm_org_type"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),

View File

@ -2,7 +2,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("analytics", "0001_initial"),
]

View File

@ -2,7 +2,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("analytics", "0002_remove_huddlecount"),
]

View File

@ -2,7 +2,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("analytics", "0003_fillstate"),
]

View File

@ -2,7 +2,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("analytics", "0004_add_subgroup"),
]

View File

@ -2,7 +2,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("analytics", "0005_alter_field_size"),
]

View File

@ -3,7 +3,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("analytics", "0006_add_subgroup_to_unique_constraints"),
]

View File

@ -3,7 +3,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0050_userprofile_avatar_version"),
("analytics", "0007_remove_interval"),

View File

@ -21,7 +21,6 @@ def delete_messages_sent_to_stream_stat(
class Migration(migrations.Migration):
dependencies = [
("analytics", "0008_add_count_indexes"),
]

View File

@ -21,7 +21,6 @@ def clear_message_sent_by_message_type_values(
class Migration(migrations.Migration):
dependencies = [("analytics", "0009_remove_messages_to_stream_stat")]
operations = [

View File

@ -18,7 +18,6 @@ def clear_analytics_tables(apps: StateApps, schema_editor: BaseDatabaseSchemaEdi
class Migration(migrations.Migration):
dependencies = [
("analytics", "0010_clear_messages_sent_values"),
]

View File

@ -5,7 +5,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("analytics", "0011_clear_analytics_tables"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("analytics", "0012_add_on_delete"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("analytics", "0013_remove_anomaly"),
]

View File

@ -55,7 +55,6 @@ def clear_duplicate_counts(apps: StateApps, schema_editor: BaseDatabaseSchemaEdi
class Migration(migrations.Migration):
dependencies = [
("analytics", "0014_remove_fillstate_last_modified"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("analytics", "0015_clear_duplicate_counts"),
]

View File

@ -29,7 +29,6 @@ if settings.BILLING_ENABLED:
def make_table(
title: str, cols: Sequence[str], rows: Sequence[Any], has_row_class: bool = False
) -> str:
if not has_row_class:
def fix_row(row: Any) -> Dict[str, Any]:

View File

@ -3,7 +3,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0001_initial"),
]

View File

@ -3,7 +3,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0001_initial"),
]

View File

@ -3,7 +3,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0002_realmcreationkey"),
]

View File

@ -3,7 +3,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0003_emailchangeconfirmation"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0124_stream_enable_notifications"),
("confirmation", "0004_remove_confirmationmanager"),

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0005_confirmation_realm"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0006_realmcreationkey_presume_email_valid"),
]

View File

@ -2,7 +2,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0007_add_indexes"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0009_confirmation_expiry_date_backfill"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("confirmation", "0010_alter_confirmation_expiry_date"),
]

View File

@ -5,7 +5,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0001_initial"),
]

View File

@ -5,7 +5,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0002_customer_default_discount"),
]

View File

@ -5,7 +5,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0003_customerplan"),
]

View File

@ -5,7 +5,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0004_licenseledger"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0005_customerplan_invoicing"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("corporate", "0006_nullable_stripe_customer_id"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0007_remove_deprecated_fields"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0008_nullable_next_invoice_date"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0009_customer_sponsorship_pending"),
]

View File

@ -6,7 +6,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("zerver", "0333_alter_realm_org_type"),

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0012_zulipsponsorshiprequest"),
]

View File

@ -4,7 +4,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0013_alter_zulipsponsorshiprequest_org_website"),
]

View File

@ -5,7 +5,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("corporate", "0014_customerplan_end_date"),

View File

@ -5,7 +5,6 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zilencer", "0018_remoterealmauditlog"),
("zerver", "0370_realm_enable_spectator_access"),

View File

@ -3,7 +3,6 @@ from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0001_initial"),
]

View File

@ -69,7 +69,7 @@ def update_fts_columns(cursor: psycopg2.extensions.cursor) -> int:
[BATCH_SIZE],
)
ids = []
for (id, message_id) in cursor.fetchall():
for id, message_id in cursor.fetchall():
if USING_PGROONGA:
cursor.execute(
"UPDATE zerver_message SET "

View File

@ -241,7 +241,6 @@ def create_log_entry(
copied_packages: Set[str],
new_packages: Set[str],
) -> None:
venv_path = os.path.dirname(target_log)
with open(target_log, "a") as writer:
writer.write(f"{venv_path}\n")
@ -289,7 +288,6 @@ def setup_virtualenv(
requirements_file: str,
patch_activate_script: bool = False,
) -> str:
sha1sum = generate_hash(requirements_file)
# Check if a cached version already exists
if target_venv_path is None:
@ -320,7 +318,6 @@ def add_cert_to_pipconf() -> None:
def do_setup_virtualenv(venv_path: str, requirements_file: str) -> None:
# Set up Python virtualenv
new_packages = set(get_package_names(requirements_file))

View File

@ -37,7 +37,6 @@ def write_updated_configs() -> None:
with open("/etc/zulip/nginx_sharding_map.conf.tmp", "w") as nginx_sharding_conf_f, open(
"/etc/zulip/sharding.json.tmp", "w"
) as sharding_json_f:
if len(ports) == 1:
nginx_sharding_conf_f.write('map "" $tornado_server {\n')
nginx_sharding_conf_f.write(" default http://tornado;\n")

View File

@ -194,7 +194,6 @@ def generate_secrets(development: bool = False) -> None:
if __name__ == "__main__":
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(

View File

@ -74,7 +74,7 @@ def split_for_id_and_class(element: str) -> List[str]:
return lst
def build_id_dict(templates: List[str]) -> (Dict[str, List[str]]):
def build_id_dict(templates: List[str]) -> Dict[str, List[str]]:
template_id_dict: (Dict[str, List[str]]) = defaultdict(list)
for fn in templates:

View File

@ -232,7 +232,6 @@ REPO_STOPWORDS_PATH = os.path.join(
def install_system_deps() -> None:
# By doing list -> set -> list conversion, we remove duplicates.
deps_to_install = sorted(set(SYSTEM_DEPENDENCIES))
@ -347,7 +346,6 @@ def install_yum_deps(deps_to_install: List[str]) -> None:
def main(options: argparse.Namespace) -> NoReturn:
# yarn and management commands expect to be run from the root of the
# project.
os.chdir(ZULIP_PATH)

View File

@ -54,7 +54,6 @@ def resolve_conflicts(conflicts: List[str], files_list: List[str]) -> None:
if __name__ == "__main__":
MIGRATIONS_TO_SKIP = {"0209", "0261"}
while True:
conflicts: List[str] = []

View File

@ -165,7 +165,7 @@ def main() -> None:
all_canonical_names.add(canonical_name)
# STEP 2: We don't support having aliases that collide with canonical names for emoji, so remove them.
for (emoji_code, emoji_names) in all_emojis.items():
for emoji_code, emoji_names in all_emojis.items():
# Copy the list to not iterate while elements are being deleted.
aliases = emoji_names["aliases"][:]
for alias in aliases:
@ -187,7 +187,7 @@ def main() -> None:
# STEP 4: We keep non-ascii (non-"English") characters in some emoji names if that's the correct
# way to spell that word, but always add an alias for an ascii-only version of the word.
for (emoji_code, emoji_names) in all_emojis.items():
for emoji_code, emoji_names in all_emojis.items():
for name in [emoji_names["canonical_name"]] + emoji_names["aliases"]:
# These are known names where we don't have an ascii-only version and there are ascii aliases
# that a user can still enter instead to get the same emoji.
@ -208,7 +208,7 @@ def main() -> None:
"# Generated with `generate_emoji_names`.\n\n"
"EMOJI_NAME_MAPS: Dict[str, Dict[str, Any]] = {\n"
)
for (key, emoji_names) in all_emojis.items():
for key, emoji_names in all_emojis.items():
f.write(f" {key!r}: {emoji_names!r},\n")
f.write("}\n")

View File

@ -29,7 +29,6 @@ class IdentityArgsDict(TypedDict, total=False):
def main() -> None:
session = boto3.session.Session()
from_address = settings.NOREPLY_EMAIL_ADDRESS

View File

@ -183,7 +183,6 @@ class ParserTest(unittest.TestCase):
self._assert_validate_error("Tag name missing", text=my_html)
def test_code_blocks(self) -> None:
# This is fine.
my_html = """
<code>
@ -204,7 +203,6 @@ class ParserTest(unittest.TestCase):
self._assert_validate_error("Code tag is split across two lines.", text=my_html)
def test_anchor_blocks(self) -> None:
# This is allowed, although strange.
my_html = """
<a href="/some/url">

View File

@ -156,7 +156,7 @@ add_log(out_dict, repo_log)
# TODO: We should migrate the last couple repositories to use the
# `main` default branch name and then simplify this.
for (full_repository, branch) in [
for full_repository, branch in [
("zulip/zulip-mobile", "main"),
("zulip/zulip-desktop", "main"),
("zulip/docker-zulip", "main"),

View File

@ -240,10 +240,10 @@ def do_clear_mobile_push_notifications_for_ids(
.values_list("user_profile_id", "message_id")
)
for (user_id, message_id) in notifications_to_update:
for user_id, message_id in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
for user_profile_id, event_message_ids in messages_by_user.items():
queue_json_publish(
"missedmessage_mobile_notifications",
{

View File

@ -407,7 +407,6 @@ def get_service_bot_events(
active_user_ids: Set[int],
recipient_type: int,
) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
@ -1157,7 +1156,6 @@ def check_send_message(
*,
skip_stream_access_check: bool = False,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
try:
message = check_message(
@ -1658,7 +1656,6 @@ def internal_send_stream_message(
email_gateway: bool = False,
limit_unread_user_ids: Optional[Set[int]] = None,
) -> Optional[int]:
message = internal_prep_stream_message(
sender,
stream,

View File

@ -20,7 +20,7 @@ from zerver.tornado.django_api import send_event
@transaction.atomic(durable=True)
def do_add_realm_domain(
realm: Realm, domain: str, allow_subdomains: bool, *, acting_user: Optional[UserProfile]
) -> (RealmDomain):
) -> RealmDomain:
realm_domain = RealmDomain.objects.create(
realm=realm, domain=domain, allow_subdomains=allow_subdomains
)

View File

@ -313,7 +313,6 @@ def bulk_add_subs_to_db_with_logging(
subs_to_add: List[SubInfo],
subs_to_activate: List[SubInfo],
) -> None:
Subscription.objects.bulk_create(info.sub for info in subs_to_add)
sub_ids = [info.sub.id for info in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
@ -624,7 +623,6 @@ def send_peer_remove_events(
def notify_subscriptions_removed(
realm: Realm, user_profile: UserProfile, streams: Iterable[Stream]
) -> None:
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove", subscriptions=payload)
send_event(realm, event, [user_profile.id])
@ -641,7 +639,7 @@ def send_subscription_remove_events(
) -> None:
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for (user, stream) in removed_subs:
for user, stream in removed_subs:
streams_by_user[user.id].append(stream)
altered_user_dict[stream.id].add(user.id)
@ -673,7 +671,6 @@ def bulk_remove_subscriptions(
*,
acting_user: Optional[UserProfile],
) -> SubAndRemovedT:
users = list(users)
streams = list(streams)

View File

@ -11,7 +11,6 @@ from zerver.tornado.django_api import send_event
def do_send_typing_notification(
realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
@ -67,7 +66,6 @@ def check_send_typing_notification(sender: UserProfile, user_ids: List[int], ope
def do_send_stream_typing_notification(
sender: UserProfile, operator: str, stream: Stream, topic: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
event = dict(

View File

@ -176,7 +176,6 @@ def make_user_messages(
mention_map: Dict[int, Set[int]],
wildcard_mention_map: Mapping[int, bool] = {},
) -> List[ZerverFieldsT]:
zerver_usermessage = []
for message in zerver_message:
@ -220,7 +219,6 @@ def build_stream_subscriptions(
zerver_recipient: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT],
) -> List[ZerverFieldsT]:
subscriptions: List[ZerverFieldsT] = []
stream_ids = {stream["id"] for stream in zerver_stream}
@ -249,7 +247,6 @@ def build_huddle_subscriptions(
zerver_recipient: List[ZerverFieldsT],
zerver_huddle: List[ZerverFieldsT],
) -> List[ZerverFieldsT]:
subscriptions: List[ZerverFieldsT] = []
huddle_ids = {huddle["id"] for huddle in zerver_huddle}
@ -274,7 +271,6 @@ def build_huddle_subscriptions(
def build_personal_subscriptions(zerver_recipient: List[ZerverFieldsT]) -> List[ZerverFieldsT]:
subscriptions: List[ZerverFieldsT] = []
personal_recipients = [
@ -456,7 +452,6 @@ def build_stream(
invite_only: bool = False,
stream_post_policy: int = 1,
) -> ZerverFieldsT:
# Other applications don't have the distinction of "private stream with public history"
# vs "private stream with hidden history" - and we've traditionally imported private "streams"
# of other products as private streams with hidden history.
@ -783,7 +778,7 @@ def long_term_idle_helper(
recent_senders.add(user)
sender_counts[user] += 1
for (user, count) in sender_counts.items():
for user, count in sender_counts.items():
if count > 10:
recent_senders.add(user)

View File

@ -119,7 +119,6 @@ def convert_user_data(
realm_id: int,
team_name: str,
) -> None:
user_data_list = []
for username in user_data_map:
user = user_data_map[username]
@ -239,7 +238,6 @@ def convert_huddle_data(
realm_id: int,
team_name: str,
) -> List[ZerverFieldsT]:
zerver_huddle = []
for huddle in huddle_data:
if len(huddle["members"]) > 2:
@ -545,7 +543,6 @@ def process_posts(
zerver_attachment: List[ZerverFieldsT],
mattermost_data_dir: str,
) -> None:
post_data_list = []
for post in post_data:
if "team" not in post:

View File

@ -93,7 +93,6 @@ def convert_to_zulip_markdown(
tokens = text.split(" ")
for iterator in range(len(tokens)):
# Check user mentions and change mention format from
# '<@slack_id|short_name>' to '@**full_name**'
if re.findall(SLACK_USERMENTION_REGEX, tokens[iterator], re.VERBOSE):

View File

@ -102,7 +102,6 @@ class Addressee:
topic_name: Optional[str],
realm: Optional[Realm] = None,
) -> "Addressee":
# For legacy reason message_to used to be either a list of
# emails or a list of streams. We haven't fixed all of our
# callers yet.

View File

@ -27,7 +27,7 @@ def alert_words_in_realm(realm: Realm) -> Dict[int, List[str]]:
def get_alert_word_automaton(realm: Realm) -> ahocorasick.Automaton:
user_id_with_words = alert_words_in_realm(realm)
alert_word_automaton = ahocorasick.Automaton()
for (user_id, alert_words) in user_id_with_words.items():
for user_id, alert_words in user_id_with_words.items():
for alert_word in alert_words:
alert_word_lower = alert_word.lower()
if alert_word_automaton.exists(alert_word_lower):

View File

@ -17,7 +17,6 @@ from zerver.models import UserProfile
def avatar_url(
user_profile: UserProfile, medium: bool = False, client_gravatar: bool = False
) -> Optional[str]:
return get_avatar_field(
user_id=user_profile.id,
realm_id=user_profile.realm_id,

View File

@ -17,7 +17,6 @@ def gravatar_hash(email: str) -> str:
def user_avatar_hash(uid: str) -> str:
# WARNING: If this method is changed, you may need to do a migration
# similar to zerver/migrations/0060_move_avatars_to_be_uid_based.py .
@ -29,7 +28,6 @@ def user_avatar_hash(uid: str) -> str:
def user_avatar_path(user_profile: UserProfile) -> str:
# WARNING: If this method is changed, you may need to do a migration
# similar to zerver/migrations/0060_move_avatars_to_be_uid_based.py .
return user_avatar_path_from_ids(user_profile.id, user_profile.realm_id)

View File

@ -24,7 +24,6 @@ from zerver.models import UserProfile, get_active_user
def get_bot_handler(service_name: str) -> Any:
# Check that this service is present in EMBEDDED_BOTS, add exception handling.
configured_service = ""
for embedded_bot_service in EMBEDDED_BOTS:

View File

@ -38,7 +38,7 @@ def bulk_create_users(
# Now create user_profiles
profiles_to_create: List[UserProfile] = []
for (email, full_name, active) in users:
for email, full_name, active in users:
profile = create_user_profile(
realm,
email,

View File

@ -346,6 +346,7 @@ CacheItemT = TypeVar("CacheItemT")
# serializable objects, will be the object; if encoded, bytes.
CompressedItemT = TypeVar("CompressedItemT")
# Required arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
@ -382,7 +383,7 @@ def generic_bulk_cached_fetch(
)
cached_objects: Dict[str, CacheItemT] = {}
for (key, val) in cached_objects_compressed.items():
for key, val in cached_objects_compressed.items():
cached_objects[key] = extractor(cached_objects_compressed[key][0])
needed_ids = [
object_id for object_id in object_ids if cache_keys[object_id] not in cached_objects

View File

@ -28,6 +28,7 @@ from typing import Any, Dict, List, Optional
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Some DER encoding stuff. Bleh. This is because the ccache contains a
# DER-encoded krb5 Ticket structure, whereas Webathena deserializes
# into the various fields. Re-encoding in the client would be easy as

View File

@ -243,7 +243,6 @@ def event_dict_type(
required_keys: Sequence[Tuple[str, Any]],
optional_keys: Sequence[Tuple[str, Any]] = [],
) -> DictType:
"""
This is just a tiny wrapper on DictType, but it provides
some minor benefits:

View File

@ -9,6 +9,7 @@ Query = Union[str, bytes, Composable]
Params = Union[Sequence[object], Mapping[str, object], None]
ParamsT = TypeVar("ParamsT")
# Similar to the tracking done in Django's CursorDebugWrapper, but done at the
# psycopg2 cursor level so it works with SQLAlchemy.
def wrapper_execute(

View File

@ -15,6 +15,7 @@ from django.utils.timezone import now as timezone_now
logger = logging.getLogger("zulip.debug")
# Interactive debugging code from
# https://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
# (that link also points to code for an interactive remote debugger

View File

@ -94,7 +94,6 @@ def validate_email_is_valid(
email: str,
validate_email_allowed_in_realm: Callable[[str], None],
) -> Optional[str]:
try:
validators.validate_email(email)
except ValidationError:

View File

@ -72,7 +72,7 @@ Deployed version: {version}
more_info = report["more_info"]
if more_info is not None:
body += "\nAdditional information:"
for (key, value) in more_info.items():
for key, value in more_info.items():
body += f"\n {key}: {value}"
body += "\n\nLog:\n{log}".format(**report)

View File

@ -763,7 +763,7 @@ def apply_event(
if "profile_data" not in user_dict:
continue
profile_data = user_dict["profile_data"]
for (field_id, field_data) in list(profile_data.items()):
for field_id, field_data in list(profile_data.items()):
if int(field_id) not in custom_profile_field_ids:
del profile_data[field_id]
elif event["type"] == "realm_user":

View File

@ -1083,7 +1083,6 @@ def fetch_reaction_data(response: TableData, message_ids: Set[int]) -> None:
def custom_fetch_huddle_objects(response: TableData, context: Context) -> None:
realm = context["realm"]
user_profile_ids = {r["id"] for r in response["zerver_userprofile"]}
@ -1343,7 +1342,6 @@ def write_message_partials(
output_dir: Path,
user_profile_ids: Set[int],
) -> None:
dump_file_id = 1
for message_id_chunk in message_id_chunks:
@ -1644,7 +1642,6 @@ def export_files_from_s3(
def export_uploads_from_local(
realm: Realm, local_dir: Path, output_dir: Path, attachments: List[Attachment]
) -> None:
count = 0
records = []
for attachment in attachments:
@ -1686,7 +1683,6 @@ def export_avatars_from_local(
users: List[UserProfile],
handle_system_bots: bool,
) -> None:
count = 0
records = []
@ -1762,7 +1758,6 @@ def get_emoji_path(realm_emoji: RealmEmoji) -> str:
def export_emoji_from_local(
realm: Realm, local_dir: Path, output_dir: Path, realm_emojis: List[RealmEmoji]
) -> None:
count = 0
records = []
for realm_emoji in realm_emojis:
@ -1995,7 +1990,6 @@ def do_export_user(user_profile: UserProfile, output_dir: Path) -> None:
def export_single_user(user_profile: UserProfile, response: TableData) -> None:
config = get_single_user_config()
export_from_config(
response=response,

View File

@ -40,7 +40,6 @@ def get_timing(message: str, f: Callable[[], None]) -> None:
def fix_unsubscribed(cursor: CursorWrapper, user_profile: UserProfile) -> None:
recipient_ids = []
def find_recipients() -> None:

View File

@ -57,7 +57,6 @@ def generate_topics(num_topics: int) -> List[str]:
def load_generators(config: Dict[str, Any]) -> Dict[str, Any]:
results = {}
cfg = config["gen_fodder"]
@ -81,7 +80,6 @@ def load_generators(config: Dict[str, Any]) -> Dict[str, Any]:
def parse_file(config: Dict[str, Any], gens: Dict[str, Any], corpus_file: str) -> List[str]:
# First, load the entire file into a dictionary,
# then apply our custom filters to it as needed.
@ -96,7 +94,6 @@ def parse_file(config: Dict[str, Any], gens: Dict[str, Any], corpus_file: str) -
def get_flair_gen(length: int) -> List[str]:
# Grab the percentages from the config file
# create a list that we can consume that will guarantee the distribution
result = []
@ -111,7 +108,6 @@ def get_flair_gen(length: int) -> List[str]:
def add_flair(paragraphs: List[str], gens: Dict[str, Any]) -> List[str]:
# roll the dice and see what kind of flair we should add, if any
results = []
@ -158,7 +154,6 @@ def add_flair(paragraphs: List[str], gens: Dict[str, Any]) -> List[str]:
def add_md(mode: str, text: str) -> str:
# mode means: bold, italic, etc.
# to add a list at the end of a paragraph, * item one\n * item two
@ -174,7 +169,6 @@ def add_md(mode: str, text: str) -> str:
def add_emoji(text: str, emoji: str) -> str:
vals = text.split()
start = random.randrange(len(vals))
@ -183,7 +177,6 @@ def add_emoji(text: str, emoji: str) -> str:
def add_link(text: str, link: str) -> str:
vals = text.split()
start = random.randrange(len(vals))
@ -193,7 +186,6 @@ def add_link(text: str, link: str) -> str:
def remove_line_breaks(fh: Any) -> List[str]:
# We're going to remove line breaks from paragraphs
results = [] # save the dialogs as tuples with (author, dialog)
@ -215,13 +207,11 @@ def remove_line_breaks(fh: Any) -> List[str]:
def write_file(paragraphs: List[str], filename: str) -> None:
with open(filename, "wb") as outfile:
outfile.write(orjson.dumps(paragraphs))
def create_test_data() -> None:
gens = load_generators(config) # returns a dictionary of generators
paragraphs = parse_file(config, gens, config["corpus"]["filename"])

View File

@ -1060,7 +1060,7 @@ def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Rea
re_map_foreign_keys(data, "zerver_defaultstream", "stream", related_table="stream")
re_map_foreign_keys(data, "zerver_realmemoji", "author", related_table="user_profile")
for (table, model, related_table) in realm_tables:
for table, model, related_table in realm_tables:
re_map_foreign_keys(data, table, "realm", related_table="realm")
update_model_ids(model, data, related_table)
bulk_import_model(data, model)
@ -1513,7 +1513,6 @@ def import_message_data(realm: Realm, sender_map: Dict[int, Record], import_dir:
def import_attachments(data: TableData) -> None:
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, "zerver_attachment")

View File

@ -22,7 +22,7 @@ def is_integer_string(val: str) -> bool:
def check_config() -> None:
for (setting_name, default) in settings.REQUIRED_SETTINGS:
for setting_name, default in settings.REQUIRED_SETTINGS:
# if required setting is the same as default OR is not found in settings,
# throw error to add/set that setting in config
try:
@ -42,7 +42,6 @@ class CreateUserParameters:
class ZulipBaseCommand(BaseCommand):
# Fix support for multi-line usage
def create_parser(self, prog_name: str, subcommand: str, **kwargs: Any) -> CommandParser:
parser = super().create_parser(prog_name, subcommand, **kwargs)
@ -143,7 +142,6 @@ server via `ps -ef` or reading bash history. Prefer
return [self.get_user(email, realm) for email in emails]
def get_user(self, email: str, realm: Optional[Realm]) -> UserProfile:
# If a realm is specified, try to find the user there, and
# throw an error if they don't exist.
if realm is not None:

View File

@ -1089,7 +1089,6 @@ class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
root: Element,
found_url: ResultWithFamily[Tuple[str, Optional[str]]],
) -> LinkInfo:
grandparent = found_url.family.grandparent
parent = found_url.family.parent
ahref_element = found_url.family.child
@ -1661,7 +1660,6 @@ class ListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
"""
def __init__(self, parser: BlockParser) -> None:
# HACK: Set the tab length to 2 just for the initialization of
# this class, so that bulleted lists (and only bulleted lists)
# work off 2-space indentation.
@ -1997,7 +1995,6 @@ def possible_linked_stream_names(content: str) -> Set[str]:
class AlertWordNotificationProcessor(markdown.preprocessors.Preprocessor):
allowed_before_punctuation = {" ", "\n", "(", '"', ".", ",", "'", ";", "[", "*", "`", ">"}
allowed_after_punctuation = {
" ",
@ -2586,7 +2583,6 @@ def do_convert(
# Pre-fetch data from the DB that is used in the Markdown thread
if message_realm is not None:
# Here we fetch the data structures needed to render
# mentions/stream mentions from the database, but only
# if there is syntax in the message that might use them, since

View File

@ -196,7 +196,6 @@ class APIArgumentsTablePreprocessor(Preprocessor):
object_values = schema.get("properties", {})
for value in object_values:
description = ""
if "description" in object_values[value]:
description = object_values[value]["description"]

View File

@ -8,7 +8,6 @@ class DiffError(Exception):
def diff_strings(output: str, expected_output: str) -> str:
mdiff_path = "frontend_tests/zjsunit/mdiff.js"
if not os.path.isfile(mdiff_path): # nocoverage
msg = "Cannot find mdiff for Markdown diff rendering"

View File

@ -206,7 +206,6 @@ def messages_for_ids(
client_gravatar: bool,
allow_edit_history: bool,
) -> List[Dict[str, Any]]:
cache_transformer = MessageDict.build_dict_from_raw_db_row
id_fetcher = lambda row: row["id"]
@ -522,7 +521,6 @@ class MessageDict:
reactions: List[RawReactionRow],
submessages: List[Dict[str, Any]],
) -> Dict[str, Any]:
obj = dict(
id=message_id,
sender_id=sender_id,
@ -588,7 +586,6 @@ class MessageDict:
@staticmethod
def bulk_hydrate_sender_info(objs: List[Dict[str, Any]]) -> None:
sender_ids = list({obj["sender_id"] for obj in objs})
if not sender_ids:
@ -1055,7 +1052,6 @@ def get_raw_unread_data(
def extract_unread_data_from_um_rows(
rows: List[Dict[str, Any]], user_profile: Optional[UserProfile]
) -> RawUnreadMessagesResult:
pm_dict: Dict[int, RawUnreadPrivateMessageDict] = {}
stream_dict: Dict[int, RawUnreadStreamDict] = {}
unmuted_stream_msgs: Set[int] = set()
@ -1234,7 +1230,6 @@ def aggregate_huddles(*, input_dict: Dict[int, RawUnreadHuddleDict]) -> List[Unr
def aggregate_unread_data(raw_data: RawUnreadMessagesResult) -> UnreadMessagesResult:
pm_dict = raw_data["pm_dict"]
stream_dict = raw_data["stream_dict"]
unmuted_stream_msgs = raw_data["unmuted_stream_msgs"]
@ -1574,7 +1569,7 @@ def get_recent_private_conversations(user_profile: UserProfile) -> Dict[int, Dic
)
# Now we need to map all the recipient_id objects to lists of user IDs
for (recipient_id, user_profile_id) in (
for recipient_id, user_profile_id in (
Subscription.objects.filter(recipient_id__in=recipient_map.keys())
.exclude(user_profile_id=user_profile.id)
.values_list("recipient_id", "user_profile_id")

View File

@ -652,7 +652,6 @@ class NarrowBuilder:
def narrow_parameter(var_name: str, json: str) -> OptionalNarrowListT:
data = orjson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
@ -661,7 +660,6 @@ def narrow_parameter(var_name: str, json: str) -> OptionalNarrowListT:
return None
def convert_term(elem: Union[Dict[str, Any], List[str]]) -> Dict[str, Any]:
# We have to support a legacy tuple format.
if isinstance(elem, list):
if len(elem) != 2 or any(not isinstance(x, str) for x in elem):

View File

@ -47,7 +47,6 @@ class UserMessageNotificationsData:
muted_sender_user_ids: Set[int],
all_bot_user_ids: Set[int],
) -> "UserMessageNotificationsData":
if user_id in all_bot_user_ids:
# Don't send any notifications to bots
return cls(

View File

@ -165,7 +165,6 @@ def get_service_interface_class(interface: str) -> Any:
def get_outgoing_webhook_service_handler(service: Service) -> Any:
service_interface_class = get_service_interface_class(service.interface_name())
service_interface = service_interface_class(
token=service.token, user_profile=service.user_profile, service_name=service.name

View File

@ -13,7 +13,6 @@ from zerver.models import PushDeviceToken, Realm, UserPresence, UserProfile, que
def get_presence_dicts_for_rows(
all_rows: Sequence[Mapping[str, Any]], mobile_user_ids: Set[int], slim_presence: bool
) -> Dict[str, Dict[str, Any]]:
# Note that datetime values have sub-second granularity, which is
# mostly important for avoiding test flakes, but it's also technically
# more precise for real users.
@ -48,7 +47,6 @@ def get_presence_dicts_for_rows(
def get_modern_user_presence_info(
presence_rows: Sequence[Mapping[str, Any]], mobile_user_ids: Set[int]
) -> Dict[str, Any]:
active_timestamp = None
for row in reversed(presence_rows):
if row["status"] == UserPresence.ACTIVE:
@ -78,7 +76,6 @@ def get_modern_user_presence_info(
def get_legacy_user_presence_info(
presence_rows: Sequence[Mapping[str, Any]], mobile_user_ids: Set[int]
) -> Dict[str, Any]:
# The format of data here is for legacy users of our API,
# including old versions of the mobile app.
info_rows = []
@ -190,7 +187,6 @@ def get_presence_dict_by_realm(
def get_presences_for_realm(
realm: Realm, slim_presence: bool
) -> Dict[str, Dict[str, Dict[str, Any]]]:
if realm.presence_disabled:
# Return an empty dict if presence is disabled in this realm
return defaultdict(dict)

View File

@ -50,6 +50,7 @@ if settings.ZILENCER_ENABLED:
DeviceToken = Union[PushDeviceToken, "RemotePushDeviceToken"]
# We store the token as b64, but apns-client wants hex strings
def b64_to_hex(data: str) -> str:
return base64.b64decode(data).hex()

View File

@ -23,6 +23,7 @@ MAX_REQUEST_RETRIES = 3
ChannelT = TypeVar("ChannelT", Channel, BlockingChannel)
Consumer = Callable[[ChannelT, Basic.Deliver, pika.BasicProperties, bytes], None]
# This simple queuing library doesn't expose much of the power of
# RabbitMQ/Pika's queuing system; its purpose is to just provide an
# interface for external files to put things into queues and take them

View File

@ -12,7 +12,6 @@ def get_recipient_from_user_profiles(
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {user_profile.id: user_profile for user_profile in recipient_profiles}
@ -87,7 +86,6 @@ def recipient_for_user_profiles(
sender: UserProfile,
allow_deactivated: bool = False,
) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(
user_profiles, sender, allow_deactivated=allow_deactivated
)

View File

@ -349,7 +349,7 @@ def has_request_variables(
view_func_full_name = ".".join([req_func.__module__, req_func.__name__])
for (name, value) in zip(default_param_names, default_param_values):
for name, value in zip(default_param_names, default_param_values):
if isinstance(value, _REQ):
value.func_var_name = name
if value.post_var_name is None:

View File

@ -48,7 +48,7 @@ def filter_by_subscription_history(
user_messages_to_insert.append(user_message)
seen_message_ids.add(message["id"])
for (stream_id, stream_messages_raw) in all_stream_messages.items():
for stream_id, stream_messages_raw in all_stream_messages.items():
stream_subscription_logs = all_stream_subscription_logs[stream_id]
# Make a copy of the original list of messages, which we will
# mutate in the loop below.

View File

@ -105,7 +105,6 @@ def get_bulk_stream_subscriber_info(
users: List[UserProfile],
streams: List[Stream],
) -> Dict[int, List[SubInfo]]:
stream_ids = {stream.id for stream in streams}
subs = Subscription.objects.filter(
@ -220,7 +219,6 @@ def bulk_get_private_peers(
realm: Realm,
private_streams: List[Stream],
) -> Dict[int, Set[int]]:
if not private_streams:
return {}

View File

@ -10,7 +10,6 @@ from zerver.models import Realm, UserProfile
def get_subdomain(request: HttpRequest) -> str:
# The HTTP spec allows, but doesn't require, a client to omit the
# port in the `Host` header if it's "the default port for the
# service requested", i.e. typically either 443 or 80; and

View File

@ -75,6 +75,7 @@ docs_without_macros = [
"incoming-webhooks-walkthrough.md",
]
# render_markdown_path is passed a context dictionary (unhashable), which
# results in the calls not being cached. To work around this, we convert the
# dict to a tuple of dict items to cache the results.

View File

@ -1556,7 +1556,6 @@ Output:
def check_has_permission_policies(
self, policy: str, validation_func: Callable[[UserProfile], bool]
) -> None:
realm = get_realm("zulip")
owner_user = self.example_user("desdemona")
admin_user = self.example_user("iago")

View File

@ -452,7 +452,6 @@ def write_instrumentation_reports(full_suite: bool, include_webhooks: bool) -> N
return url
def find_pattern(pattern: Any, prefixes: List[str]) -> None:
if isinstance(pattern, type(URLResolver)):
return # nocoverage -- shouldn't actually happen

Some files were not shown because too many files have changed in this diff Show More