mirror of https://github.com/zulip/zulip.git
python: Modernize legacy Python 2 syntax with pyupgrade.
Generated by `pyupgrade --py3-plus --keep-percent-format` on all our Python code except `zthumbor` and `zulip-ec2-configure-interfaces`, followed by manual indentation fixes. Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
This commit is contained in:
parent
fff2d3958a
commit
c734bbd95d
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import django.db.models.deletion
|
||||
from django.conf import settings
|
||||
from django.db import migrations, models
|
||||
|
@ -90,22 +89,22 @@ class Migration(migrations.Migration):
|
|||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='usercount',
|
||||
unique_together=set([('user', 'property', 'end_time', 'interval')]),
|
||||
unique_together={('user', 'property', 'end_time', 'interval')},
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='streamcount',
|
||||
unique_together=set([('stream', 'property', 'end_time', 'interval')]),
|
||||
unique_together={('stream', 'property', 'end_time', 'interval')},
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='realmcount',
|
||||
unique_together=set([('realm', 'property', 'end_time', 'interval')]),
|
||||
unique_together={('realm', 'property', 'end_time', 'interval')},
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='installationcount',
|
||||
unique_together=set([('property', 'end_time', 'interval')]),
|
||||
unique_together={('property', 'end_time', 'interval')},
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='huddlecount',
|
||||
unique_together=set([('huddle', 'property', 'end_time', 'interval')]),
|
||||
unique_together={('huddle', 'property', 'end_time', 'interval')},
|
||||
),
|
||||
]
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
|
@ -11,7 +10,7 @@ class Migration(migrations.Migration):
|
|||
operations = [
|
||||
migrations.AlterUniqueTogether(
|
||||
name='huddlecount',
|
||||
unique_together=set([]),
|
||||
unique_together=set(),
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='huddlecount',
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
|
@ -11,18 +10,18 @@ class Migration(migrations.Migration):
|
|||
operations = [
|
||||
migrations.AlterUniqueTogether(
|
||||
name='installationcount',
|
||||
unique_together=set([('property', 'subgroup', 'end_time', 'interval')]),
|
||||
unique_together={('property', 'subgroup', 'end_time', 'interval')},
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='realmcount',
|
||||
unique_together=set([('realm', 'property', 'subgroup', 'end_time', 'interval')]),
|
||||
unique_together={('realm', 'property', 'subgroup', 'end_time', 'interval')},
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='streamcount',
|
||||
unique_together=set([('stream', 'property', 'subgroup', 'end_time', 'interval')]),
|
||||
unique_together={('stream', 'property', 'subgroup', 'end_time', 'interval')},
|
||||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='usercount',
|
||||
unique_together=set([('user', 'property', 'subgroup', 'end_time', 'interval')]),
|
||||
unique_together={('user', 'property', 'subgroup', 'end_time', 'interval')},
|
||||
),
|
||||
]
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.10.4 on 2017-01-16 20:50
|
||||
from django.db import migrations
|
||||
|
||||
|
@ -12,7 +11,7 @@ class Migration(migrations.Migration):
|
|||
operations = [
|
||||
migrations.AlterUniqueTogether(
|
||||
name='installationcount',
|
||||
unique_together=set([('property', 'subgroup', 'end_time')]),
|
||||
unique_together={('property', 'subgroup', 'end_time')},
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='installationcount',
|
||||
|
@ -20,7 +19,7 @@ class Migration(migrations.Migration):
|
|||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='realmcount',
|
||||
unique_together=set([('realm', 'property', 'subgroup', 'end_time')]),
|
||||
unique_together={('realm', 'property', 'subgroup', 'end_time')},
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='realmcount',
|
||||
|
@ -28,7 +27,7 @@ class Migration(migrations.Migration):
|
|||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='streamcount',
|
||||
unique_together=set([('stream', 'property', 'subgroup', 'end_time')]),
|
||||
unique_together={('stream', 'property', 'subgroup', 'end_time')},
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='streamcount',
|
||||
|
@ -36,7 +35,7 @@ class Migration(migrations.Migration):
|
|||
),
|
||||
migrations.AlterUniqueTogether(
|
||||
name='usercount',
|
||||
unique_together=set([('user', 'property', 'subgroup', 'end_time')]),
|
||||
unique_together={('user', 'property', 'subgroup', 'end_time')},
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='usercount',
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.10.5 on 2017-02-01 22:28
|
||||
from django.db import migrations
|
||||
|
||||
|
@ -13,14 +12,14 @@ class Migration(migrations.Migration):
|
|||
operations = [
|
||||
migrations.AlterIndexTogether(
|
||||
name='realmcount',
|
||||
index_together=set([('property', 'end_time')]),
|
||||
index_together={('property', 'end_time')},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='streamcount',
|
||||
index_together=set([('property', 'realm', 'end_time')]),
|
||||
index_together={('property', 'realm', 'end_time')},
|
||||
),
|
||||
migrations.AlterIndexTogether(
|
||||
name='usercount',
|
||||
index_together=set([('property', 'realm', 'end_time')]),
|
||||
index_together={('property', 'realm', 'end_time')},
|
||||
),
|
||||
]
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations
|
||||
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
|
||||
from django.db.migrations.state import StateApps
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations
|
||||
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
|
||||
from django.db.migrations.state import StateApps
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations
|
||||
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
|
||||
from django.db.migrations.state import StateApps
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.6 on 2018-01-29 08:14
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.18 on 2019-02-02 02:47
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.26 on 2020-01-27 04:32
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import migrations
|
||||
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
|
||||
from django.db.migrations.state import StateApps
|
||||
|
|
|
@ -261,7 +261,7 @@ def get_chart_data(request: HttpRequest, user_profile: UserProfile, chart_name:
|
|||
"analytics cron job running?" % (realm.string_id, start, end))
|
||||
raise JsonableError(_("No analytics data available. Please contact your server administrator."))
|
||||
|
||||
assert len(set([stat.frequency for stat in stats])) == 1
|
||||
assert len({stat.frequency for stat in stats}) == 1
|
||||
end_times = time_range(start, end, stats[0].frequency, min_length)
|
||||
data = {'end_times': end_times, 'frequency': stats[0].frequency} # type: Dict[str, Any]
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import models, migrations
|
||||
import django.db.models.deletion
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import models, migrations
|
||||
import django.utils.timezone
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.10.4 on 2017-01-17 09:16
|
||||
from django.db import migrations
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.2 on 2017-07-08 04:23
|
||||
from django.db import migrations, models
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.6 on 2017-11-30 00:13
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.6 on 2018-01-29 18:39
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
|
||||
|
||||
__revision__ = '$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $'
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
|
||||
|
||||
__revision__ = '$Id: settings.py 12 2008-11-23 19:38:52Z jarek.zgoda $'
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.14 on 2018-09-25 12:02
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2018-12-12 20:19
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.16 on 2018-12-22 21:05
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.18 on 2019-01-19 05:01
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.18 on 2019-01-28 13:04
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import django.db.models.deletion
|
||||
from django.db import migrations, models
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.18 on 2019-01-29 01:46
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.18 on 2019-01-31 22:16
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by Django 1.11.20 on 2019-04-11 00:45
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ def read_stripe_fixture(decorated_function_name: str,
|
|||
def _read_stripe_fixture(*args: Any, **kwargs: Any) -> Any:
|
||||
mock = operator.attrgetter(mocked_function_name)(sys.modules[__name__])
|
||||
fixture_path = stripe_fixture_path(decorated_function_name, mocked_function_name, mock.call_count)
|
||||
fixture = ujson.load(open(fixture_path, 'r'))
|
||||
fixture = ujson.load(open(fixture_path))
|
||||
# Check for StripeError fixtures
|
||||
if "json_body" in fixture:
|
||||
requestor = stripe.api_requestor.APIRequestor()
|
||||
|
@ -151,7 +151,7 @@ def normalize_fixture_data(decorated_function: CallableT,
|
|||
normalized_values = {pattern: {}
|
||||
for pattern in pattern_translations.keys()} # type: Dict[str, Dict[str, str]]
|
||||
for fixture_file in fixture_files_for_function(decorated_function):
|
||||
with open(fixture_file, "r") as f:
|
||||
with open(fixture_file) as f:
|
||||
file_content = f.read()
|
||||
for pattern, translation in pattern_translations.items():
|
||||
for match in re.findall(pattern, file_content):
|
||||
|
@ -214,7 +214,7 @@ def mock_stripe(tested_timestamp_fields: List[str]=[],
|
|||
|
||||
# A Kandra is a fictional character that can become anything. Used as a
|
||||
# wildcard when testing for equality.
|
||||
class Kandra(object): # nocoverage: TODO
|
||||
class Kandra: # nocoverage: TODO
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return True
|
||||
|
||||
|
@ -288,7 +288,7 @@ class StripeTestCase(ZulipTestCase):
|
|||
|
||||
# Upgrade without talking to Stripe
|
||||
def local_upgrade(self, *args: Any) -> None:
|
||||
class StripeMock(object):
|
||||
class StripeMock:
|
||||
def __init__(self, depth: int=1):
|
||||
self.id = 'id'
|
||||
self.created = '1000'
|
||||
|
@ -1045,8 +1045,8 @@ class RequiresBillingAccessTest(ZulipTestCase):
|
|||
# Make sure that we are testing all the JSON endpoints
|
||||
# Quite a hack, but probably fine for now
|
||||
string_with_all_endpoints = str(get_resolver('corporate.urls').reverse_dict)
|
||||
json_endpoints = set([word.strip("\"'()[],$") for word in string_with_all_endpoints.split()
|
||||
if 'json' in word])
|
||||
json_endpoints = {word.strip("\"'()[],$") for word in string_with_all_endpoints.split()
|
||||
if 'json' in word}
|
||||
# No need to test upgrade endpoint as it only requires user to be logged in.
|
||||
json_endpoints.remove("json/billing/upgrade")
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# zulip-contributor-docs documentation build configuration file, created by
|
||||
# sphinx-quickstart on Mon Aug 17 16:24:04 2015.
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python3
|
||||
from __future__ import (print_function)
|
||||
import os
|
||||
import sys
|
||||
import configparser
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.conf import settings
|
||||
from django.db import migrations
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.db import connection, migrations
|
||||
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
|
||||
from django.db.migrations.state import StateApps
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from django.conf import settings
|
||||
from django.db import migrations
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ def nagios_from_file(results_file: str, max_time_diff: int=60 * 2) -> 'Tuple[int
|
|||
This file is created by various nagios checking cron jobs such as
|
||||
check-rabbitmq-queues and check-rabbitmq-consumers"""
|
||||
|
||||
with open(results_file, 'r') as f:
|
||||
with open(results_file) as f:
|
||||
data = f.read().strip()
|
||||
pieces = data.split('|')
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ def get_zulips():
|
|||
last_event_id = max(last_event_id, int(event['id']))
|
||||
# If we get a heartbeat event, that means we've been hanging for
|
||||
# 40s, and we should bail.
|
||||
if 'heartbeat' in set(event['type'] for event in res['events']):
|
||||
if 'heartbeat' in {event['type'] for event in res['events']}:
|
||||
report("CRITICAL", msg="Got heartbeat waiting for Zulip, which means get_events is hanging")
|
||||
return [event['message'] for event in res['events']]
|
||||
|
||||
|
|
|
@ -24,9 +24,9 @@ if subprocess.check_output(['psql', '-v', 'ON_ERROR_STOP=1',
|
|||
report('OK', 'this is not the primary')
|
||||
|
||||
try:
|
||||
with open('/var/lib/nagios_state/last_postgres_backup', 'r') as f:
|
||||
with open('/var/lib/nagios_state/last_postgres_backup') as f:
|
||||
last_backup = dateutil.parser.parse(f.read())
|
||||
except IOError:
|
||||
except OSError:
|
||||
report('UNKNOWN', 'could not determine completion time of last Postgres backup')
|
||||
|
||||
if datetime.now(tz=pytz.utc) - last_backup > timedelta(hours=25):
|
||||
|
|
|
@ -31,7 +31,7 @@ down_count = 0
|
|||
for results_file_name in os.listdir(RESULTS_DIR):
|
||||
this_state = "OK"
|
||||
results_file = os.path.join(RESULTS_DIR, results_file_name)
|
||||
with open(results_file, 'r') as f:
|
||||
with open(results_file) as f:
|
||||
data = f.read().strip()
|
||||
last_check = os.stat(results_file).st_mtime
|
||||
time_since_last_check = time.time() - last_check
|
||||
|
|
|
@ -29,7 +29,7 @@ def report(state, data, last_check):
|
|||
data))
|
||||
exit(states[state])
|
||||
|
||||
with open(RESULTS_FILE, 'r') as f:
|
||||
with open(RESULTS_FILE) as f:
|
||||
data = f.read().strip()
|
||||
if data.split("\n")[-1].strip() == "0":
|
||||
state = "OK"
|
||||
|
|
|
@ -18,7 +18,7 @@ if ENV == "travis":
|
|||
|
||||
def get_caches_in_use(threshold_days):
|
||||
# type: (int) -> Set[str]
|
||||
setups_to_check = set([ZULIP_PATH, ])
|
||||
setups_to_check = {ZULIP_PATH}
|
||||
caches_in_use = set()
|
||||
|
||||
if ENV == "prod":
|
||||
|
|
|
@ -25,7 +25,7 @@ if ENV == "travis":
|
|||
|
||||
def get_caches_in_use(threshold_days):
|
||||
# type: (int) -> Set[str]
|
||||
setups_to_check = set([ZULIP_PATH, ])
|
||||
setups_to_check = {ZULIP_PATH}
|
||||
caches_in_use = set()
|
||||
|
||||
if ENV == "prod":
|
||||
|
|
|
@ -20,7 +20,7 @@ if ENV == "travis":
|
|||
|
||||
def get_caches_in_use(threshold_days):
|
||||
# type: (int) -> Set[str]
|
||||
setups_to_check = set([ZULIP_PATH, ])
|
||||
setups_to_check = {ZULIP_PATH}
|
||||
caches_in_use = set()
|
||||
|
||||
def add_current_venv_cache(venv_name: str) -> None:
|
||||
|
|
|
@ -39,7 +39,7 @@ def generate_sha1sum_node_modules(setup_dir=None, production=DEFAULT_PRODUCTION)
|
|||
if os.path.exists(YARN_LOCK_FILE_PATH):
|
||||
# For backwards compatibility, we can't assume yarn.lock exists
|
||||
sha1sum.update(subprocess_text_output(['cat', YARN_LOCK_FILE_PATH]).encode('utf8'))
|
||||
with open(YARN_PACKAGE_JSON, "r") as f:
|
||||
with open(YARN_PACKAGE_JSON) as f:
|
||||
yarn_version = json.load(f)['version']
|
||||
sha1sum.update(yarn_version.encode("utf8"))
|
||||
sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))
|
||||
|
|
|
@ -171,7 +171,7 @@ def get_venv_packages(venv_path):
|
|||
package index file.
|
||||
"""
|
||||
with open(get_index_filename(venv_path)) as reader:
|
||||
return set(p.strip() for p in reader.read().split('\n') if p.strip())
|
||||
return {p.strip() for p in reader.read().split('\n') if p.strip()}
|
||||
|
||||
def try_to_copy_venv(venv_path, new_packages):
|
||||
# type: (str, Set[str]) -> bool
|
||||
|
@ -281,7 +281,7 @@ def do_patch_activate_script(venv_path):
|
|||
# venv_path should be what we want to have in VIRTUAL_ENV after patching
|
||||
script_path = os.path.join(venv_path, "bin", "activate")
|
||||
|
||||
with open(script_path, 'r') as f:
|
||||
with open(script_path) as f:
|
||||
lines = f.readlines()
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('VIRTUAL_ENV='):
|
||||
|
|
|
@ -65,7 +65,7 @@ if not args.skip_puppet:
|
|||
subprocess.check_call(["apt-get", "update"])
|
||||
subprocess.check_call(["apt-get", "-y", "upgrade"])
|
||||
|
||||
if not os.path.exists((os.path.join(deploy_path, "zproject/prod_settings.py"))):
|
||||
if not os.path.exists(os.path.join(deploy_path, "zproject/prod_settings.py")):
|
||||
# This is normally done in unpack-zulip, but for upgrading from
|
||||
# zulip<1.4.0, we need to do it. See discussion in commit 586b23637.
|
||||
os.symlink("/etc/zulip/settings.py",
|
||||
|
|
|
@ -289,7 +289,7 @@ def get_caches_to_be_purged(caches_dir, caches_in_use, threshold_days):
|
|||
|
||||
def purge_unused_caches(caches_dir, caches_in_use, cache_type, args):
|
||||
# type: (str, Set[str], str, argparse.Namespace) -> None
|
||||
all_caches = set([os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)])
|
||||
all_caches = {os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)}
|
||||
caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)
|
||||
caches_to_keep = all_caches - caches_to_purge
|
||||
|
||||
|
@ -313,7 +313,7 @@ def generate_sha1sum_emoji(zulip_path):
|
|||
# Take into account the version of `emoji-datasource-google` package
|
||||
# while generating success stamp.
|
||||
PACKAGE_FILE_PATH = os.path.join(zulip_path, 'package.json')
|
||||
with open(PACKAGE_FILE_PATH, 'r') as fp:
|
||||
with open(PACKAGE_FILE_PATH) as fp:
|
||||
parsed_package_file = json.load(fp)
|
||||
dependency_data = parsed_package_file['dependencies']
|
||||
|
||||
|
@ -366,7 +366,7 @@ def parse_os_release():
|
|||
we avoid using it, as it is not available on RHEL-based platforms.
|
||||
"""
|
||||
distro_info = {} # type: Dict[str, str]
|
||||
with open('/etc/os-release', 'r') as fp:
|
||||
with open('/etc/os-release') as fp:
|
||||
for line in fp:
|
||||
line = line.strip()
|
||||
if not line or line.startswith('#'):
|
||||
|
|
|
@ -36,8 +36,8 @@ def parse_args():
|
|||
|
||||
def get_deployments_to_be_purged(recent_deployments):
|
||||
# type: (Set[str]) -> Set[str]
|
||||
all_deployments = set([os.path.join(DEPLOYMENTS_DIR, deployment)
|
||||
for deployment in os.listdir(DEPLOYMENTS_DIR)])
|
||||
all_deployments = {os.path.join(DEPLOYMENTS_DIR, deployment)
|
||||
for deployment in os.listdir(DEPLOYMENTS_DIR)}
|
||||
deployments_to_purge = set()
|
||||
for deployment in all_deployments:
|
||||
# Deployments whose name is not in the format of a timestamp are
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Scrapy settings for documentation_crawler project
|
||||
#
|
||||
# For simplicity, this file contains only settings considered important or
|
||||
|
|
|
@ -41,8 +41,8 @@ def get_config():
|
|||
|
||||
def user_exists(username):
|
||||
# type: (str) -> bool
|
||||
print("Checking to see if GitHub user {0} exists...".format(username))
|
||||
user_api_url = "https://api.github.com/users/{0}".format(username)
|
||||
print("Checking to see if GitHub user {} exists...".format(username))
|
||||
user_api_url = "https://api.github.com/users/{}".format(username)
|
||||
try:
|
||||
response = urllib.request.urlopen(user_api_url)
|
||||
json.load(response)
|
||||
|
@ -50,30 +50,30 @@ def user_exists(username):
|
|||
return True
|
||||
except urllib.error.HTTPError as err:
|
||||
print(err)
|
||||
print("Does the github user {0} exist?".format(username))
|
||||
print("Does the github user {} exist?".format(username))
|
||||
sys.exit(1)
|
||||
|
||||
def get_keys(username):
|
||||
# type: (str) -> List[Dict[str, Any]]
|
||||
print("Checking to see that GitHub user has available public keys...")
|
||||
apiurl_keys = "https://api.github.com/users/{0}/keys".format(username)
|
||||
apiurl_keys = "https://api.github.com/users/{}/keys".format(username)
|
||||
try:
|
||||
response = urllib.request.urlopen(apiurl_keys)
|
||||
userkeys = json.load(response)
|
||||
if not userkeys:
|
||||
print("No keys found. Has user {0} added ssh keys to their github account?".format(username))
|
||||
print("No keys found. Has user {} added ssh keys to their github account?".format(username))
|
||||
sys.exit(1)
|
||||
print("...public keys found!")
|
||||
return userkeys
|
||||
except urllib.error.HTTPError as err:
|
||||
print(err)
|
||||
print("Has user {0} added ssh keys to their github account?".format(username))
|
||||
print("Has user {} added ssh keys to their github account?".format(username))
|
||||
sys.exit(1)
|
||||
|
||||
def fork_exists(username):
|
||||
# type: (str) -> bool
|
||||
print("Checking to see GitHub user has forked zulip/zulip...")
|
||||
apiurl_fork = "https://api.github.com/repos/{0}/zulip".format(username)
|
||||
apiurl_fork = "https://api.github.com/repos/{}/zulip".format(username)
|
||||
try:
|
||||
response = urllib.request.urlopen(apiurl_fork)
|
||||
json.load(response)
|
||||
|
@ -81,21 +81,21 @@ def fork_exists(username):
|
|||
return True
|
||||
except urllib.error.HTTPError as err:
|
||||
print(err)
|
||||
print("Has user {0} forked zulip/zulip?".format(username))
|
||||
print("Has user {} forked zulip/zulip?".format(username))
|
||||
sys.exit(1)
|
||||
|
||||
def exit_if_droplet_exists(my_token: str, username: str, recreate: bool) -> None:
|
||||
print("Checking to see if droplet for {0} already exists...".format(username))
|
||||
print("Checking to see if droplet for {} already exists...".format(username))
|
||||
manager = digitalocean.Manager(token=my_token)
|
||||
my_droplets = manager.get_all_droplets()
|
||||
for droplet in my_droplets:
|
||||
if droplet.name == "{0}.zulipdev.org".format(username):
|
||||
if droplet.name == "{}.zulipdev.org".format(username):
|
||||
if not recreate:
|
||||
print("Droplet for user {0} already exists. Pass --recreate if you "
|
||||
print("Droplet for user {} already exists. Pass --recreate if you "
|
||||
"need to recreate the droplet.".format(username))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Deleting existing droplet for {0}.".format(username))
|
||||
print("Deleting existing droplet for {}.".format(username))
|
||||
droplet.destroy()
|
||||
return
|
||||
print("...No droplet found...proceeding.")
|
||||
|
@ -108,7 +108,7 @@ def set_user_data(username, userkeys):
|
|||
# spaces here are important here - these need to be properly indented under
|
||||
# ssh_authorized_keys:
|
||||
for key in userkeys:
|
||||
ssh_authorized_keys += "\n - {0}".format(key['key'])
|
||||
ssh_authorized_keys += "\n - {}".format(key['key'])
|
||||
# print(ssh_authorized_keys)
|
||||
|
||||
setup_repo = """\
|
||||
|
@ -121,11 +121,11 @@ cd /home/zulipdev/{1} && git remote add origin https://github.com/{0}/{1}.git &&
|
|||
#cloud-config
|
||||
users:
|
||||
- name: zulipdev
|
||||
ssh_authorized_keys:{0}
|
||||
ssh_authorized_keys:{}
|
||||
runcmd:
|
||||
- su -c '{1}' zulipdev
|
||||
- su -c '{}' zulipdev
|
||||
- su -c 'git clean -f' zulipdev
|
||||
- su -c '{2}' zulipdev
|
||||
- su -c '{}' zulipdev
|
||||
- su -c 'git clean -f' zulipdev
|
||||
- su -c 'git config --global core.editor nano' zulipdev
|
||||
- su -c 'git config --global pull.rebase true' zulipdev
|
||||
|
@ -141,7 +141,7 @@ def create_droplet(my_token, template_id, username, tags, user_data):
|
|||
# type: (str, str, str, List[str], str) -> str
|
||||
droplet = digitalocean.Droplet(
|
||||
token=my_token,
|
||||
name='{0}.zulipdev.org'.format(username),
|
||||
name='{}.zulipdev.org'.format(username),
|
||||
region='nyc3',
|
||||
image=template_id,
|
||||
size_slug='2gb',
|
||||
|
@ -157,7 +157,7 @@ def create_droplet(my_token, template_id, username, tags, user_data):
|
|||
actions = droplet.get_actions()
|
||||
for action in actions:
|
||||
action.load()
|
||||
print("...[{0}]: {1}".format(action.type, action.status))
|
||||
print("...[{}]: {}".format(action.type, action.status))
|
||||
if action.type == 'create' and action.status == 'completed':
|
||||
incomplete = False
|
||||
break
|
||||
|
@ -165,7 +165,7 @@ def create_droplet(my_token, template_id, username, tags, user_data):
|
|||
time.sleep(15)
|
||||
print("...droplet created!")
|
||||
droplet.load()
|
||||
print("...ip address for new droplet is: {0}.".format(droplet.ip_address))
|
||||
print("...ip address for new droplet is: {}.".format(droplet.ip_address))
|
||||
return droplet.ip_address
|
||||
|
||||
def delete_existing_records(records: List[digitalocean.Record], record_name: str) -> None:
|
||||
|
@ -175,7 +175,7 @@ def delete_existing_records(records: List[digitalocean.Record], record_name: str
|
|||
record.destroy()
|
||||
count = count + 1
|
||||
if count:
|
||||
print("Deleted {0} existing A records for {1}.zulipdev.org.".format(count, record_name))
|
||||
print("Deleted {} existing A records for {}.zulipdev.org.".format(count, record_name))
|
||||
|
||||
def create_dns_record(my_token, username, ip_address):
|
||||
# type: (str, str, str) -> None
|
||||
|
@ -187,9 +187,9 @@ def create_dns_record(my_token, username, ip_address):
|
|||
wildcard_name = "*." + username
|
||||
delete_existing_records(records, wildcard_name)
|
||||
|
||||
print("Creating new A record for {0}.zulipdev.org that points to {1}.".format(username, ip_address))
|
||||
print("Creating new A record for {}.zulipdev.org that points to {}.".format(username, ip_address))
|
||||
domain.create_new_domain_record(type='A', name=username, data=ip_address)
|
||||
print("Creating new A record for *.{0}.zulipdev.org that points to {1}.".format(username, ip_address))
|
||||
print("Creating new A record for *.{}.zulipdev.org that points to {}.".format(username, ip_address))
|
||||
domain.create_new_domain_record(type='A', name=wildcard_name, data=ip_address)
|
||||
|
||||
def print_completion(username):
|
||||
|
@ -231,7 +231,7 @@ if __name__ == '__main__':
|
|||
|
||||
# get command line arguments
|
||||
args = parser.parse_args()
|
||||
print("Creating Zulip developer environment for GitHub user {0}...".format(args.username))
|
||||
print("Creating Zulip developer environment for GitHub user {}...".format(args.username))
|
||||
|
||||
# get config details
|
||||
config = get_config()
|
||||
|
|
|
@ -54,7 +54,7 @@ def write_to_disk(json_data: ContributorsJSON, out_file: str) -> None:
|
|||
with open(out_file, 'w') as f:
|
||||
try:
|
||||
f.write("{}\n".format(json.dumps(json_data, indent=2, sort_keys=True)))
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
logger.warning(e)
|
||||
sys.exit(1)
|
||||
|
||||
|
@ -126,7 +126,7 @@ def update_contributor_data_file() -> None:
|
|||
|
||||
# remove duplicate contributions count
|
||||
# find commits at the time of split and subtract from zulip-server
|
||||
with open(duplicate_commits_file, 'r') as f:
|
||||
with open(duplicate_commits_file) as f:
|
||||
duplicate_commits = json.load(f)
|
||||
for committer in duplicate_commits:
|
||||
if committer in contribs_list and contribs_list[committer].get('server'):
|
||||
|
|
|
@ -15,7 +15,7 @@ def debug(obj):
|
|||
|
||||
def parse_file(fn):
|
||||
# type: (str) -> Dict[str, Any]
|
||||
with open(fn, 'r') as f:
|
||||
with open(fn) as f:
|
||||
text = f.read()
|
||||
tags = re.findall(r'{+\s*(.*?)\s*}+', text)
|
||||
root = {} # type: Dict[str, Any]
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
#!/usr/bin/env python3
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
|
||||
import configparser
|
||||
from hashlib import md5
|
||||
|
|
|
@ -159,7 +159,7 @@ class TitleMatchRegexAllowException(LineRule):
|
|||
regex = self.options['regex'].value
|
||||
pattern = re.compile(regex, re.UNICODE)
|
||||
if not pattern.search(title) and not title.startswith("Revert \""):
|
||||
violation_msg = u"Title does not match regex ({0})".format(regex)
|
||||
violation_msg = "Title does not match regex ({})".format(regex)
|
||||
return [RuleViolation(self.id, violation_msg, title)]
|
||||
|
||||
return []
|
||||
|
|
|
@ -119,7 +119,7 @@ def make_dot_file(graph):
|
|||
|
||||
def test():
|
||||
# type: () -> None
|
||||
graph = Graph(set([
|
||||
graph = Graph({
|
||||
('x', 'a'),
|
||||
('a', 'b'),
|
||||
('b', 'c'),
|
||||
|
@ -128,7 +128,7 @@ def test():
|
|||
('d', 'e'),
|
||||
('e', 'f'),
|
||||
('e', 'g'),
|
||||
]))
|
||||
})
|
||||
graph.remove_exterior_nodes()
|
||||
|
||||
s = make_dot_file(graph)
|
||||
|
|
|
@ -193,7 +193,7 @@ def build_id_dict(templates):
|
|||
template_id_dict = defaultdict(list) # type: (Dict[str, List[str]])
|
||||
|
||||
for fn in templates:
|
||||
with open(fn, 'r') as f:
|
||||
with open(fn) as f:
|
||||
text = f.read()
|
||||
list_tags = tokenize(text)
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ def show_all_branches(fns):
|
|||
# type: (List[str]) -> None
|
||||
for fn in fns:
|
||||
print(fn)
|
||||
with open(fn, 'r') as f:
|
||||
with open(fn) as f:
|
||||
text = f.read()
|
||||
branches = html_branches(text, fn=fn)
|
||||
for branch in branches:
|
||||
|
@ -26,7 +26,7 @@ class Grepper:
|
|||
all_branches = [] # type: List[HtmlTreeBranch]
|
||||
|
||||
for fn in fns:
|
||||
with open(fn, 'r') as f:
|
||||
with open(fn) as f:
|
||||
text = f.read()
|
||||
branches = html_branches(text, fn=fn)
|
||||
all_branches += branches
|
||||
|
|
|
@ -193,7 +193,7 @@ def pretty_print_html(html, num_spaces=4):
|
|||
|
||||
def validate_indent_html(fn, fix):
|
||||
# type: (str, bool) -> int
|
||||
with open(fn, 'r') as f:
|
||||
with open(fn) as f:
|
||||
html = f.read()
|
||||
phtml = pretty_print_html(html)
|
||||
if not html.split('\n') == phtml.split('\n'):
|
||||
|
|
|
@ -41,7 +41,7 @@ def setup_shell_profile(shell_profile):
|
|||
def write_command(command):
|
||||
# type: (str) -> None
|
||||
if os.path.exists(shell_profile_path):
|
||||
with open(shell_profile_path, 'r') as shell_profile_file:
|
||||
with open(shell_profile_path) as shell_profile_file:
|
||||
lines = [line.strip() for line in shell_profile_file.readlines()]
|
||||
if command not in lines:
|
||||
with open(shell_profile_path, 'a+') as shell_profile_file:
|
||||
|
@ -77,7 +77,7 @@ def setup_bash_profile() -> None:
|
|||
|
||||
if os.path.exists(DOT_PROFILE):
|
||||
try:
|
||||
with open(BASH_PROFILE, "r") as f:
|
||||
with open(BASH_PROFILE) as f:
|
||||
profile_contents = f.read()
|
||||
if profile_contents == OLD_PROFILE_TEXT:
|
||||
os.unlink(BASH_PROFILE)
|
||||
|
|
|
@ -214,7 +214,7 @@ def validate(fn=None, text=None, check_indent=True):
|
|||
fn = '<in memory file>'
|
||||
|
||||
if text is None:
|
||||
with open(fn, 'r') as f:
|
||||
with open(fn) as f:
|
||||
text = f.read()
|
||||
|
||||
tokens = tokenize(text)
|
||||
|
|
|
@ -56,7 +56,7 @@ def get_provisioning_status():
|
|||
# their own dependencies and not running provision.
|
||||
return True, None
|
||||
|
||||
with open(version_file, 'r') as f:
|
||||
with open(version_file) as f:
|
||||
version = f.read().strip()
|
||||
|
||||
# Normal path for people that provision--we're all good!
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
#!/usr/bin/env python3
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
|
||||
from typing import List, TYPE_CHECKING
|
||||
|
||||
from zulint.custom_rules import RuleList
|
||||
|
@ -57,7 +52,7 @@ FILES_WITH_LEGACY_SUBJECT = {
|
|||
shebang_rules = [
|
||||
{'pattern': '^#!',
|
||||
'description': "zerver library code shouldn't have a shebang line.",
|
||||
'include_only': set(['zerver/'])},
|
||||
'include_only': {'zerver/'}},
|
||||
# /bin/sh and /usr/bin/env are the only two binaries
|
||||
# that NixOS provides at a fixed path (outside a
|
||||
# buildFHSUserEnv sandbox).
|
||||
|
@ -81,12 +76,12 @@ whitespace_rules = [
|
|||
},
|
||||
{'pattern': '\t',
|
||||
'strip': '\n',
|
||||
'exclude': set(['tools/ci/success-http-headers.txt']),
|
||||
'exclude': {'tools/ci/success-http-headers.txt'},
|
||||
'description': 'Fix tab-based whitespace'},
|
||||
] # type: List[Rule]
|
||||
comma_whitespace_rule = [
|
||||
{'pattern': ', {2,}[^#/ ]',
|
||||
'exclude': set(['zerver/tests', 'frontend_tests/node_tests', 'corporate/tests']),
|
||||
'exclude': {'zerver/tests', 'frontend_tests/node_tests', 'corporate/tests'},
|
||||
'description': "Remove multiple whitespaces after ','",
|
||||
'good_lines': ['foo(1, 2, 3)', 'foo = bar # some inline comment'],
|
||||
'bad_lines': ['foo(1, 2, 3)', 'foo(1, 2, 3)']},
|
||||
|
@ -110,8 +105,8 @@ js_rules = RuleList(
|
|||
langs=['js'],
|
||||
rules=[
|
||||
{'pattern': 'subject|SUBJECT',
|
||||
'exclude': set(['static/js/util.js',
|
||||
'frontend_tests/']),
|
||||
'exclude': {'static/js/util.js',
|
||||
'frontend_tests/'},
|
||||
'exclude_pattern': 'emails',
|
||||
'description': 'avoid subject in JS code',
|
||||
'good_lines': ['topic_name'],
|
||||
|
@ -155,7 +150,7 @@ js_rules = RuleList(
|
|||
'description': 'Write JS else statements on same line as }'},
|
||||
{'pattern': r'''[.]text\(["'][a-zA-Z]''',
|
||||
'description': 'Strings passed to $().text should be wrapped in i18n.t() for internationalization',
|
||||
'exclude': set(['frontend_tests/node_tests/'])},
|
||||
'exclude': {'frontend_tests/node_tests/'}},
|
||||
{'pattern': r'''compose_error\(["']''',
|
||||
'description': 'Argument to compose_error should be a literal string enclosed '
|
||||
'by i18n.t()'},
|
||||
|
@ -177,7 +172,7 @@ js_rules = RuleList(
|
|||
'bad_lines': ['$(document).ready(function () {foo();}']},
|
||||
{'pattern': '[$][.](get|post|patch|delete|ajax)[(]',
|
||||
'description': "Use channel module for AJAX calls",
|
||||
'exclude': set([
|
||||
'exclude': {
|
||||
# Internal modules can do direct network calls
|
||||
'static/js/blueslip.js',
|
||||
'static/js/channel.js',
|
||||
|
@ -185,17 +180,17 @@ js_rules = RuleList(
|
|||
'static/js/stats/',
|
||||
'static/js/portico/',
|
||||
'static/js/billing/',
|
||||
]),
|
||||
},
|
||||
'good_lines': ['channel.get(...)'],
|
||||
'bad_lines': ['$.get()', '$.post()', '$.ajax()']},
|
||||
{'pattern': 'style ?=',
|
||||
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
|
||||
'exclude': set([
|
||||
'exclude': {
|
||||
'frontend_tests/node_tests/copy_and_paste.js',
|
||||
'frontend_tests/node_tests/upload.js',
|
||||
'static/js/upload.js',
|
||||
'static/js/stream_color.js',
|
||||
]),
|
||||
},
|
||||
'good_lines': ['#my-style {color: blue;}'],
|
||||
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
|
||||
*whitespace_rules,
|
||||
|
@ -212,14 +207,14 @@ python_rules = RuleList(
|
|||
'good_lines': ['topic_name'],
|
||||
'bad_lines': ['subject="foo"', ' MAX_SUBJECT_LEN'],
|
||||
'exclude': FILES_WITH_LEGACY_SUBJECT,
|
||||
'include_only': set([
|
||||
'include_only': {
|
||||
'zerver/data_import/',
|
||||
'zerver/lib/',
|
||||
'zerver/tests/',
|
||||
'zerver/views/'])},
|
||||
'zerver/views/'}},
|
||||
{'pattern': 'msgid|MSGID',
|
||||
'exclude': set(['tools/check-capitalization',
|
||||
'tools/i18n/tagmessages']),
|
||||
'exclude': {'tools/check-capitalization',
|
||||
'tools/i18n/tagmessages'},
|
||||
'description': 'Avoid using "msgid" as a variable name; use "message_id" instead.'},
|
||||
{'pattern': '^(?!#)@login_required',
|
||||
'description': '@login_required is unsupported; use @zulip_login_required',
|
||||
|
@ -227,10 +222,10 @@ python_rules = RuleList(
|
|||
'bad_lines': ['@login_required', ' @login_required']},
|
||||
{'pattern': '^user_profile[.]save[(][)]',
|
||||
'description': 'Always pass update_fields when saving user_profile objects',
|
||||
'exclude_line': set([
|
||||
'exclude_line': {
|
||||
('zerver/lib/actions.py', "user_profile.save() # Can't use update_fields because of how the foreign key works."),
|
||||
]),
|
||||
'exclude': set(['zerver/tests', 'zerver/lib/create_user.py']),
|
||||
},
|
||||
'exclude': {'zerver/tests', 'zerver/lib/create_user.py'},
|
||||
'good_lines': ['user_profile.save(update_fields=["pointer"])'],
|
||||
'bad_lines': ['user_profile.save()']},
|
||||
{'pattern': r'^[^"]*"[^"]*"%\(',
|
||||
|
@ -252,7 +247,7 @@ python_rules = RuleList(
|
|||
'bad_lines': ['a =b', 'asdf =42']},
|
||||
{'pattern': r'":\w[^"]*$',
|
||||
'description': 'Missing whitespace after ":"',
|
||||
'exclude': set(['zerver/tests/test_push_notifications.py']),
|
||||
'exclude': {'zerver/tests/test_push_notifications.py'},
|
||||
'good_lines': ['"foo": bar', '"some:string:with:colons"'],
|
||||
'bad_lines': ['"foo":bar', '"foo":1']},
|
||||
{'pattern': r"':\w[^']*$",
|
||||
|
@ -261,7 +256,7 @@ python_rules = RuleList(
|
|||
'bad_lines': ["'foo':bar", "'foo':1"]},
|
||||
{'pattern': r"^\s+#\w",
|
||||
'strip': '\n',
|
||||
'exclude': set(['tools/droplets/create.py']),
|
||||
'exclude': {'tools/droplets/create.py'},
|
||||
'description': 'Missing whitespace after "#"',
|
||||
'good_lines': ['a = b # some operation', '1+2 # 3 is the result'],
|
||||
'bad_lines': [' #some operation', ' #not valid!!!']},
|
||||
|
@ -278,9 +273,9 @@ python_rules = RuleList(
|
|||
'good_lines': ['# type: (Any, Any)', 'colon:separated:string:containing:type:as:keyword'],
|
||||
'bad_lines': ['# type:(Any, Any)']},
|
||||
{'pattern': "type: ignore$",
|
||||
'exclude': set(['tools/tests',
|
||||
'exclude': {'tools/tests',
|
||||
'zerver/lib/test_runner.py',
|
||||
'zerver/tests']),
|
||||
'zerver/tests'},
|
||||
'description': '"type: ignore" should always end with "# type: ignore # explanation for why"',
|
||||
'good_lines': ['foo = bar # type: ignore # explanation'],
|
||||
'bad_lines': ['foo = bar # type: ignore']},
|
||||
|
@ -318,17 +313,17 @@ python_rules = RuleList(
|
|||
'good_lines': ['"foo %s bar" % ("baz",)"'],
|
||||
'bad_lines': ['"foo %s bar" % ("baz")']},
|
||||
{'pattern': 'sudo',
|
||||
'include_only': set(['scripts/']),
|
||||
'exclude': set(['scripts/lib/setup_venv.py']),
|
||||
'exclude_line': set([
|
||||
'include_only': {'scripts/'},
|
||||
'exclude': {'scripts/lib/setup_venv.py'},
|
||||
'exclude_line': {
|
||||
('scripts/lib/zulip_tools.py', 'sudo_args = kwargs.pop(\'sudo_args\', [])'),
|
||||
('scripts/lib/zulip_tools.py', 'args = [\'sudo\'] + sudo_args + [\'--\'] + args'),
|
||||
]),
|
||||
},
|
||||
'description': 'Most scripts are intended to run on systems without sudo.',
|
||||
'good_lines': ['subprocess.check_call(["ls"])'],
|
||||
'bad_lines': ['subprocess.check_call(["sudo", "ls"])']},
|
||||
{'pattern': 'django.utils.translation',
|
||||
'include_only': set(['test/', 'zerver/views/development/']),
|
||||
'include_only': {'test/', 'zerver/views/development/'},
|
||||
'description': 'Test strings should not be tagged for translation',
|
||||
'good_lines': [''],
|
||||
'bad_lines': ['django.utils.translation']},
|
||||
|
@ -341,25 +336,23 @@ python_rules = RuleList(
|
|||
'good_lines': ['return json_success()'],
|
||||
'bad_lines': ['return json_success({})']},
|
||||
{'pattern': r'\Wjson_error\(_\(?\w+\)',
|
||||
'exclude': set(['zerver/tests', 'zerver/views/development/']),
|
||||
'exclude': {'zerver/tests', 'zerver/views/development/'},
|
||||
'description': 'Argument to json_error should be a literal string enclosed by _()',
|
||||
'good_lines': ['return json_error(_("string"))'],
|
||||
'bad_lines': ['return json_error(_variable)', 'return json_error(_(variable))']},
|
||||
{'pattern': r'''\Wjson_error\(['"].+[),]$''',
|
||||
'exclude': set(['zerver/tests']),
|
||||
'exclude': {'zerver/tests'},
|
||||
'description': 'Argument to json_error should a literal string enclosed by _()'},
|
||||
# To avoid JsonableError(_variable) and JsonableError(_(variable))
|
||||
{'pattern': r'\WJsonableError\(_\(?\w.+\)',
|
||||
'exclude': set(['zerver/tests', 'zerver/views/development/']),
|
||||
'exclude': {'zerver/tests', 'zerver/views/development/'},
|
||||
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
|
||||
{'pattern': r'''\WJsonableError\(["'].+\)''',
|
||||
'exclude': set(['zerver/tests', 'zerver/views/development/']),
|
||||
'exclude': {'zerver/tests', 'zerver/views/development/'},
|
||||
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
|
||||
{'pattern': r"""\b_\((?:\s|{}|{})*[^\s'")]""".format(PYSQ, PYDQ),
|
||||
'description': 'Called _() on a computed string',
|
||||
'exclude_line': set([
|
||||
('zerver/lib/i18n.py', 'result = _(string)'),
|
||||
]),
|
||||
'exclude_line': {('zerver/lib/i18n.py', 'result = _(string)')},
|
||||
'good_lines': ["return json_error(_('No presence data for %s') % (target.email,))"],
|
||||
'bad_lines': ["return json_error(_('No presence data for %s' % (target.email,)))"]},
|
||||
{'pattern': r'''([a-zA-Z0-9_]+)=REQ\(['"]\1['"]''',
|
||||
|
@ -371,35 +364,35 @@ python_rules = RuleList(
|
|||
'''},
|
||||
# Directly fetching Message objects in e.g. views code is often a security bug.
|
||||
{'pattern': '[^r]Message.objects.get',
|
||||
'exclude': set(["zerver/tests",
|
||||
'exclude': {"zerver/tests",
|
||||
"zerver/lib/onboarding.py",
|
||||
"zilencer/management/commands/add_mock_conversation.py",
|
||||
"zerver/worker/queue_processors.py",
|
||||
"zerver/management/commands/export.py",
|
||||
"zerver/lib/export.py"]),
|
||||
"zerver/lib/export.py"},
|
||||
'description': 'Please use access_message() to fetch Message objects',
|
||||
},
|
||||
{'pattern': 'Stream.objects.get',
|
||||
'include_only': set(["zerver/views/"]),
|
||||
'include_only': {"zerver/views/"},
|
||||
'description': 'Please use access_stream_by_*() to fetch Stream objects',
|
||||
},
|
||||
{'pattern': 'get_stream[(]',
|
||||
'include_only': set(["zerver/views/", "zerver/lib/actions.py"]),
|
||||
'exclude_line': set([
|
||||
'include_only': {"zerver/views/", "zerver/lib/actions.py"},
|
||||
'exclude_line': {
|
||||
# This one in check_message is kinda terrible, since it's
|
||||
# how most instances are written, but better to exclude something than nothing
|
||||
('zerver/lib/actions.py', 'stream = get_stream(stream_name, realm)'),
|
||||
('zerver/lib/actions.py', 'return get_stream("signups", realm)'),
|
||||
]),
|
||||
},
|
||||
'description': 'Please use access_stream_by_*() to fetch Stream objects',
|
||||
},
|
||||
{'pattern': 'Stream.objects.filter',
|
||||
'include_only': set(["zerver/views/"]),
|
||||
'include_only': {"zerver/views/"},
|
||||
'description': 'Please use access_stream_by_*() to fetch Stream objects',
|
||||
},
|
||||
{'pattern': '^from (zerver|analytics|confirmation)',
|
||||
'include_only': set(["/migrations/"]),
|
||||
'exclude': set([
|
||||
'include_only': {"/migrations/"},
|
||||
'exclude': {
|
||||
'zerver/migrations/0032_verify_all_medium_avatar_images.py',
|
||||
'zerver/migrations/0060_move_avatars_to_be_uid_based.py',
|
||||
'zerver/migrations/0104_fix_unreads.py',
|
||||
|
@ -407,11 +400,11 @@ python_rules = RuleList(
|
|||
'zerver/migrations/0209_user_profile_no_empty_password.py',
|
||||
'zerver/migrations/0260_missed_message_addresses_from_redis_to_db.py',
|
||||
'pgroonga/migrations/0002_html_escape_subject.py',
|
||||
]),
|
||||
},
|
||||
'description': "Don't import models or other code in migrations; see docs/subsystems/schema-migrations.md",
|
||||
},
|
||||
{'pattern': 'datetime[.](now|utcnow)',
|
||||
'include_only': set(["zerver/", "analytics/"]),
|
||||
'include_only': {"zerver/", "analytics/"},
|
||||
'description': "Don't use datetime in backend code.\n"
|
||||
"See https://zulip.readthedocs.io/en/latest/contributing/code-style.html#naive-datetime-objects",
|
||||
},
|
||||
|
@ -431,7 +424,7 @@ python_rules = RuleList(
|
|||
'good_lines': ['if my_django_model.id == 42', 'self.user_profile._meta.pk'],
|
||||
'bad_lines': ['if my_django_model.pk == 42']},
|
||||
{'pattern': r'^[ ]*# type: \(',
|
||||
'exclude': set([
|
||||
'exclude': {
|
||||
# These directories, especially scripts/ and puppet/,
|
||||
# have tools that need to run before a Zulip environment
|
||||
# is provisioned; in some of those, the `typing` module
|
||||
|
@ -446,7 +439,7 @@ python_rules = RuleList(
|
|||
'zerver/views/streams.py',
|
||||
# thumbor is (currently) python2 only
|
||||
'zthumbor/',
|
||||
]),
|
||||
},
|
||||
'description': 'Comment-style function type annotation. Use Python3 style annotations instead.',
|
||||
},
|
||||
{'pattern': r' = models[.].*null=True.*\) # type: (?!Optional)',
|
||||
|
@ -471,7 +464,7 @@ python_rules = RuleList(
|
|||
'stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Optional[Stream]'],
|
||||
},
|
||||
{'pattern': r'[\s([]Text([^\s\w]|$)',
|
||||
'exclude': set([
|
||||
'exclude': {
|
||||
# We are likely to want to keep these dirs Python 2+3 compatible,
|
||||
# since the plan includes extracting them to a separate project eventually.
|
||||
'tools/lib',
|
||||
|
@ -479,26 +472,26 @@ python_rules = RuleList(
|
|||
'zerver/migrations/',
|
||||
# thumbor is (currently) python2 only
|
||||
'zthumbor/',
|
||||
]),
|
||||
},
|
||||
'description': "Now that we're a Python 3 only codebase, we don't need to use typing.Text. Please use str instead.",
|
||||
},
|
||||
{'pattern': 'exit[(]1[)]',
|
||||
'include_only': set(["/management/commands/"]),
|
||||
'include_only': {"/management/commands/"},
|
||||
'description': 'Raise CommandError to exit with failure in management commands',
|
||||
},
|
||||
{'pattern': '.is_realm_admin =',
|
||||
'description': 'Use do_change_is_admin function rather than setting UserProfile\'s is_realm_admin attribute directly.',
|
||||
'exclude': set([
|
||||
'exclude': {
|
||||
'zerver/migrations/0248_userprofile_role_start.py',
|
||||
'zerver/tests/test_users.py',
|
||||
]),
|
||||
},
|
||||
},
|
||||
{'pattern': '.is_guest =',
|
||||
'description': 'Use do_change_is_guest function rather than setting UserProfile\'s is_guest attribute directly.',
|
||||
'exclude': set([
|
||||
'exclude': {
|
||||
'zerver/migrations/0248_userprofile_role_start.py',
|
||||
'zerver/tests/test_users.py',
|
||||
]),
|
||||
},
|
||||
},
|
||||
*whitespace_rules,
|
||||
*comma_whitespace_rule,
|
||||
|
@ -515,11 +508,11 @@ bash_rules = RuleList(
|
|||
' to set -x|set -e'},
|
||||
{'pattern': 'sudo',
|
||||
'description': 'Most scripts are intended to work on systems without sudo',
|
||||
'include_only': set(['scripts/']),
|
||||
'exclude': set([
|
||||
'include_only': {'scripts/'},
|
||||
'exclude': {
|
||||
'scripts/lib/install',
|
||||
'scripts/setup/configure-rabbitmq'
|
||||
]), },
|
||||
}, },
|
||||
*whitespace_rules[0:1],
|
||||
],
|
||||
shebang_rules=shebang_rules,
|
||||
|
@ -576,7 +569,7 @@ css_rules = RuleList(
|
|||
|
||||
prose_style_rules = [
|
||||
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
|
||||
'exclude': set(["docs/documentation/api.md"]),
|
||||
'exclude': {"docs/documentation/api.md"},
|
||||
'description': "javascript should be spelled JavaScript"},
|
||||
{'pattern': r'''[^\/\-\."'\_\=\>]([gG]ithub)[^\.\-\_"\<]''', # exclude usage in hrefs/divs
|
||||
'description': "github should be spelled GitHub"},
|
||||
|
@ -593,7 +586,7 @@ prose_style_rules = [
|
|||
] # type: List[Rule]
|
||||
html_rules = whitespace_rules + prose_style_rules + [
|
||||
{'pattern': 'subject|SUBJECT',
|
||||
'exclude': set(['templates/zerver/email.html']),
|
||||
'exclude': {'templates/zerver/email.html'},
|
||||
'exclude_pattern': 'email subject',
|
||||
'description': 'avoid subject in templates',
|
||||
'good_lines': ['topic_name'],
|
||||
|
@ -602,7 +595,7 @@ html_rules = whitespace_rules + prose_style_rules + [
|
|||
'description': "`placeholder` value should be translatable.",
|
||||
'exclude_line': {('templates/zerver/register.html', 'placeholder="acme"'),
|
||||
('templates/zerver/register.html', 'placeholder="Acme or Ακμή"')},
|
||||
'exclude': set(["templates/analytics/support.html"]),
|
||||
'exclude': {"templates/analytics/support.html"},
|
||||
'good_lines': ['<input class="stream-list-filter" type="text" placeholder="{{ _(\'Search streams\') }}" />'],
|
||||
'bad_lines': ['<input placeholder="foo">']},
|
||||
{'pattern': "placeholder='[^{]",
|
||||
|
@ -619,8 +612,8 @@ html_rules = whitespace_rules + prose_style_rules + [
|
|||
'bad_lines': ['<button aria-label="foo"></button>']},
|
||||
{'pattern': 'script src="http',
|
||||
'description': "Don't directly load dependencies from CDNs. See docs/subsystems/html-css.md",
|
||||
'exclude': set(["templates/corporate/billing.html", "templates/zerver/hello.html",
|
||||
"templates/corporate/upgrade.html"]),
|
||||
'exclude': {"templates/corporate/billing.html", "templates/zerver/hello.html",
|
||||
"templates/corporate/upgrade.html"},
|
||||
'good_lines': ["{{ render_entrypoint('landing-page') }}"],
|
||||
'bad_lines': ['<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>']},
|
||||
{'pattern': "title='[^{]",
|
||||
|
@ -628,17 +621,17 @@ html_rules = whitespace_rules + prose_style_rules + [
|
|||
'good_lines': ['<link rel="author" title="{{ _(\'About these documents\') }}" />'],
|
||||
'bad_lines': ["<p title='foo'></p>"]},
|
||||
{'pattern': r'title="[^{\:]',
|
||||
'exclude_line': set([
|
||||
'exclude_line': {
|
||||
('templates/zerver/app/markdown_help.html',
|
||||
'<td class="rendered_markdown"><img alt=":heart:" class="emoji" src="/static/generated/emoji/images/emoji/heart.png" title=":heart:" /></td>')
|
||||
]),
|
||||
'exclude': set(["templates/zerver/emails", "templates/analytics/realm_details.html", "templates/analytics/support.html"]),
|
||||
},
|
||||
'exclude': {"templates/zerver/emails", "templates/analytics/realm_details.html", "templates/analytics/support.html"},
|
||||
'description': "`title` value should be translatable."},
|
||||
{'pattern': r'''\Walt=["'][^{"']''',
|
||||
'description': "alt argument should be enclosed by _() or it should be an empty string.",
|
||||
'exclude': set(['static/templates/settings/display_settings.hbs',
|
||||
'exclude': {'static/templates/settings/display_settings.hbs',
|
||||
'templates/zerver/app/keyboard_shortcuts.html',
|
||||
'templates/zerver/app/markdown_help.html']),
|
||||
'templates/zerver/app/markdown_help.html'},
|
||||
'good_lines': ['<img src="{{source_url}}" alt="{{ _(name) }}" />', '<img alg="" />'],
|
||||
'bad_lines': ['<img alt="Foo Image" />']},
|
||||
{'pattern': r'''\Walt=["']{{ ?["']''',
|
||||
|
@ -649,13 +642,13 @@ html_rules = whitespace_rules + prose_style_rules + [
|
|||
'description': "Don't use inline event handlers (onclick=, etc. attributes) in HTML. Instead,"
|
||||
"attach a jQuery event handler ($('#foo').on('click', function () {...})) when "
|
||||
"the DOM is ready (inside a $(function () {...}) block).",
|
||||
'exclude': set(['templates/zerver/dev_login.html', 'templates/corporate/upgrade.html']),
|
||||
'exclude': {'templates/zerver/dev_login.html', 'templates/corporate/upgrade.html'},
|
||||
'good_lines': ["($('#foo').on('click', function () {}"],
|
||||
'bad_lines': ["<button id='foo' onclick='myFunction()'>Foo</button>", "<input onchange='myFunction()'>"]},
|
||||
{'pattern': 'style ?=',
|
||||
'description': "Avoid using the `style=` attribute; we prefer styling in CSS files",
|
||||
'exclude_pattern': r'.*style ?=["' + "'" + '](display: ?none|background: {{|color: {{|background-color: {{).*',
|
||||
'exclude': set([
|
||||
'exclude': {
|
||||
# KaTeX output uses style attribute
|
||||
'templates/zerver/app/markdown_help.html',
|
||||
# 5xx page doesn't have external CSS
|
||||
|
@ -714,7 +707,7 @@ html_rules = whitespace_rules + prose_style_rules + [
|
|||
'templates/analytics/realm_summary_table.html',
|
||||
'templates/corporate/zephyr.html',
|
||||
'templates/corporate/zephyr-mirror.html',
|
||||
]),
|
||||
},
|
||||
'good_lines': ['#my-style {color: blue;}', 'style="display: none"', "style='display: none"],
|
||||
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
|
||||
] # type: List[Rule]
|
||||
|
@ -764,10 +757,10 @@ json_rules = RuleList(
|
|||
trailing_whitespace_rule,
|
||||
{'pattern': '\t',
|
||||
'strip': '\n',
|
||||
'exclude': set(['zerver/webhooks/']),
|
||||
'exclude': {'zerver/webhooks/'},
|
||||
'description': 'Fix tab-based whitespace'},
|
||||
{'pattern': r'":["\[\{]',
|
||||
'exclude': set(['zerver/webhooks/', 'zerver/tests/fixtures/']),
|
||||
'exclude': {'zerver/webhooks/', 'zerver/tests/fixtures/'},
|
||||
'description': 'Require space after : in JSON'},
|
||||
]
|
||||
)
|
||||
|
@ -801,15 +794,15 @@ markdown_rules = RuleList(
|
|||
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'},
|
||||
{'pattern': 'https://zulip.readthedocs.io/en/latest/[a-zA-Z0-9]',
|
||||
'exclude': {'docs/overview/contributing.md', 'docs/overview/readme.md', 'docs/README.md'},
|
||||
'include_only': set(['docs/']),
|
||||
'include_only': {'docs/'},
|
||||
'description': "Use relative links (../foo/bar.html) to other documents in docs/",
|
||||
},
|
||||
{'pattern': "su zulip -c [^']",
|
||||
'include_only': set(['docs/']),
|
||||
'include_only': {'docs/'},
|
||||
'description': "Always quote arguments using `su zulip -c '` to avoid confusion about how su works.",
|
||||
},
|
||||
{'pattern': r'\][(][^#h]',
|
||||
'include_only': set(['README.md', 'CONTRIBUTING.md']),
|
||||
'include_only': {'README.md', 'CONTRIBUTING.md'},
|
||||
'description': "Use absolute links from docs served by GitHub",
|
||||
},
|
||||
],
|
||||
|
@ -823,10 +816,10 @@ help_markdown_rules = RuleList(
|
|||
rules=markdown_rules.rules + [
|
||||
{'pattern': '[a-z][.][A-Z]',
|
||||
'description': "Likely missing space after end of sentence",
|
||||
'include_only': set(['templates/zerver/help/']),
|
||||
'include_only': {'templates/zerver/help/'},
|
||||
},
|
||||
{'pattern': r'\b[rR]ealm[s]?\b',
|
||||
'include_only': set(['templates/zerver/help/']),
|
||||
'include_only': {'templates/zerver/help/'},
|
||||
'good_lines': ['Organization', 'deactivate_realm', 'realm_filter'],
|
||||
'bad_lines': ['Users are in a realm', 'Realm is the best model'],
|
||||
'description': "Realms are referred to as Organizations in user-facing docs."},
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
|
||||
from zulint.linters import run_pycodestyle
|
||||
|
||||
from typing import List
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
|
||||
import argparse
|
||||
|
||||
from typing import List
|
||||
|
|
|
@ -7,7 +7,7 @@ def clean_html(filenames):
|
|||
# type: (List[str]) -> None
|
||||
for fn in filenames:
|
||||
print('Prettifying: %s' % (fn,))
|
||||
with open(fn, 'r') as f:
|
||||
with open(fn) as f:
|
||||
html = f.read()
|
||||
phtml = pretty_print_html(html)
|
||||
with open(fn, 'w') as f:
|
||||
|
|
|
@ -8,7 +8,7 @@ os.chdir(os.path.join(os.path.dirname(__file__), '..'))
|
|||
pid_file_path = os.path.join(os.path.join(os.getcwd(), 'var/run/run_dev.pid'))
|
||||
|
||||
try:
|
||||
with open(pid_file_path, 'r') as pid_file:
|
||||
with open(pid_file_path) as pid_file:
|
||||
try:
|
||||
pid = int(pid_file.read())
|
||||
except ValueError:
|
||||
|
|
|
@ -143,9 +143,9 @@ FAILED_TEST_PATH = 'var/last_test_failure.json'
|
|||
|
||||
def get_failed_tests() -> List[str]:
|
||||
try:
|
||||
with open(FAILED_TEST_PATH, 'r') as f:
|
||||
with open(FAILED_TEST_PATH) as f:
|
||||
return ujson.load(f)
|
||||
except IOError:
|
||||
except OSError:
|
||||
print("var/last_test_failure.json doesn't exist; running all tests.")
|
||||
return []
|
||||
|
||||
|
|
|
@ -244,9 +244,9 @@ def check_line_coverage(fn, line_coverage, line_mapping, log=True):
|
|||
def read_coverage() -> Any:
|
||||
coverage_json = None
|
||||
try:
|
||||
with open(NODE_COVERAGE_PATH, 'r') as f:
|
||||
with open(NODE_COVERAGE_PATH) as f:
|
||||
coverage_json = ujson.load(f)
|
||||
except IOError:
|
||||
except OSError:
|
||||
print(NODE_COVERAGE_PATH + " doesn't exist. Cannot enforce fully covered files.")
|
||||
raise
|
||||
return coverage_json
|
||||
|
|
|
@ -28,7 +28,7 @@ for zuliprc_path in zuliprc_paths_list:
|
|||
zuliprc = configparser.ConfigParser()
|
||||
result = ''
|
||||
try:
|
||||
with open(zuliprc_path, 'r') as f:
|
||||
with open(zuliprc_path) as f:
|
||||
zuliprc.read_file(f, zuliprc_path)
|
||||
api_details = zuliprc['api']
|
||||
email = api_details['email']
|
||||
|
@ -66,7 +66,7 @@ for zuliprc_path in zuliprc_paths_list:
|
|||
zuliprc.write(w)
|
||||
result = 'SUCCESS'
|
||||
reason = 'API field updated for user %s' % (email,)
|
||||
except (IOError, OSError):
|
||||
except OSError:
|
||||
result = 'FAILURE'
|
||||
reason = 'Writing to file unsuccessful'
|
||||
else:
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright © 2014 Dropbox, Inc.
|
||||
#
|
||||
|
|
|
@ -238,7 +238,7 @@ def do_convert_data(gitter_data_file: str, output_dir: str, threads: int=6) -> N
|
|||
raise Exception("Output directory should be empty!")
|
||||
|
||||
# Read data from the gitter file
|
||||
with open(gitter_data_file, "r") as fp:
|
||||
with open(gitter_data_file) as fp:
|
||||
gitter_data = ujson.load(fp)
|
||||
|
||||
realm, avatar_list, user_map = gitter_workspace_to_realm(
|
||||
|
|
|
@ -84,7 +84,7 @@ def untar_input_file(tar_file: str) -> str:
|
|||
def read_user_data(data_dir: str) -> List[ZerverFieldsT]:
|
||||
fn = 'users.json'
|
||||
data_file = os.path.join(data_dir, fn)
|
||||
with open(data_file, "r") as fp:
|
||||
with open(data_file) as fp:
|
||||
return ujson.load(fp)
|
||||
|
||||
def convert_user_data(user_handler: UserHandler,
|
||||
|
@ -257,7 +257,7 @@ def convert_room_data(raw_data: List[ZerverFieldsT],
|
|||
users = set()
|
||||
if api_token is not None:
|
||||
hc = hypchat.HypChat(api_token)
|
||||
room_data = hc.fromurl('{0}/v2/room/{1}/member'.format(hc.endpoint, in_dict['id']))
|
||||
room_data = hc.fromurl('{}/v2/room/{}/member'.format(hc.endpoint, in_dict['id']))
|
||||
|
||||
for item in room_data['items']:
|
||||
hipchat_user_id = item['id']
|
||||
|
|
|
@ -681,7 +681,7 @@ def mattermost_data_file_to_dict(mattermost_data_file: str) -> Dict[str, Any]:
|
|||
mattermost_data["emoji"] = []
|
||||
mattermost_data["direct_channel"] = []
|
||||
|
||||
with open(mattermost_data_file, "r") as fp:
|
||||
with open(mattermost_data_file) as fp:
|
||||
for line in fp:
|
||||
row = ujson.loads(line.rstrip("\n"))
|
||||
data_type = row["type"]
|
||||
|
|
|
@ -205,7 +205,7 @@ def users_to_zerver_userprofile(slack_data_dir: str, users: List[ZerverFieldsT],
|
|||
if not user.get('is_primary_owner', False):
|
||||
user_id_count += 1
|
||||
|
||||
logging.info(u"{} -> {}".format(user['name'], userprofile_dict['email']))
|
||||
logging.info("{} -> {}".format(user['name'], userprofile_dict['email']))
|
||||
|
||||
process_customprofilefields(zerver_customprofilefield, zerver_customprofilefield_values)
|
||||
logging.info('######### IMPORTING USERS FINISHED #########\n')
|
||||
|
@ -397,7 +397,7 @@ def channels_to_zerver_stream(slack_data_dir: str, realm_id: int,
|
|||
|
||||
stream_id_count += 1
|
||||
recipient_id_count += 1
|
||||
logging.info(u"{} -> created".format(channel['name']))
|
||||
logging.info("{} -> created".format(channel['name']))
|
||||
|
||||
# TODO map Slack's pins to Zulip's stars
|
||||
# There is the security model that Slack's pins are known to the team owner
|
||||
|
@ -443,7 +443,7 @@ def channels_to_zerver_stream(slack_data_dir: str, realm_id: int,
|
|||
|
||||
huddle_id_count += 1
|
||||
recipient_id_count += 1
|
||||
logging.info(u"{} -> created".format(mpim['name']))
|
||||
logging.info("{} -> created".format(mpim['name']))
|
||||
|
||||
try:
|
||||
mpims = get_data_file(slack_data_dir + '/mpims.json')
|
||||
|
@ -649,8 +649,7 @@ def get_messages_iterator(slack_data_dir: str, added_channels: Dict[str, Any],
|
|||
|
||||
# we sort the messages according to the timestamp to show messages with
|
||||
# the proper date order
|
||||
for message in sorted(messages_for_one_day, key=lambda m: m['ts']):
|
||||
yield message
|
||||
yield from sorted(messages_for_one_day, key=lambda m: m['ts'])
|
||||
|
||||
def channel_message_to_zerver_message(realm_id: int,
|
||||
users: List[ZerverFieldsT],
|
||||
|
@ -1095,7 +1094,7 @@ def do_convert_data(slack_zip_file: str, output_dir: str, token: str, threads: i
|
|||
logging.info("Zulip data dump created at %s" % (output_dir,))
|
||||
|
||||
def get_data_file(path: str) -> Any:
|
||||
with open(path, "r") as fp:
|
||||
with open(path) as fp:
|
||||
data = ujson.load(fp)
|
||||
return data
|
||||
|
||||
|
|
|
@ -37,16 +37,16 @@ from typing import Any, List, Optional, Dict, Tuple
|
|||
from two_factor.forms import AuthenticationTokenForm as TwoFactorAuthenticationTokenForm
|
||||
from two_factor.utils import totp_digits
|
||||
|
||||
MIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \
|
||||
u'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \
|
||||
u'If you want to sign up an alias for Zulip, ' + \
|
||||
u'<a href="mailto:support@zulipchat.com">contact us</a>.'
|
||||
MIT_VALIDATION_ERROR = 'That user does not exist at MIT or is a ' + \
|
||||
'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \
|
||||
'If you want to sign up an alias for Zulip, ' + \
|
||||
'<a href="mailto:support@zulipchat.com">contact us</a>.'
|
||||
WRONG_SUBDOMAIN_ERROR = "Your Zulip account is not a member of the " + \
|
||||
"organization associated with this subdomain. " + \
|
||||
"Please contact your organization administrator with any questions."
|
||||
DEACTIVATED_ACCOUNT_ERROR = u"Your account is no longer active. " + \
|
||||
u"Please contact your organization administrator to reactivate it."
|
||||
PASSWORD_TOO_WEAK_ERROR = u"The password is too weak."
|
||||
DEACTIVATED_ACCOUNT_ERROR = "Your account is no longer active. " + \
|
||||
"Please contact your organization administrator to reactivate it."
|
||||
PASSWORD_TOO_WEAK_ERROR = "The password is too weak."
|
||||
AUTHENTICATION_RATE_LIMITED_ERROR = "You're making too many attempts to sign in. " + \
|
||||
"Try again in %s seconds or contact your organization administrator " + \
|
||||
"for help."
|
||||
|
|
|
@ -1158,11 +1158,11 @@ def get_recipient_info(recipient: Recipient,
|
|||
# direct recipient or were mentioned; for now, we're just making
|
||||
# sure we have the data we need for that without extra database
|
||||
# queries.
|
||||
default_bot_user_ids = set([
|
||||
default_bot_user_ids = {
|
||||
row['id']
|
||||
for row in rows
|
||||
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
|
||||
])
|
||||
}
|
||||
|
||||
service_bot_tuples = [
|
||||
(row['id'], row['bot_type'])
|
||||
|
@ -1946,7 +1946,7 @@ def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile],
|
|||
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
|
||||
recipient_profiles_map[sender.id] = sender
|
||||
|
||||
user_ids = set([user_id for user_id in recipient_profiles_map]) # type: Set[int]
|
||||
user_ids = {user_id for user_id in recipient_profiles_map} # type: Set[int]
|
||||
return get_huddle_recipient(user_ids)
|
||||
|
||||
def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile],
|
||||
|
@ -2680,7 +2680,7 @@ def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
|
|||
for stream_id in stream_ids
|
||||
])
|
||||
|
||||
result = dict((stream["id"], []) for stream in stream_dicts) # type: Dict[int, List[int]]
|
||||
result = {stream["id"]: [] for stream in stream_dicts} # type: Dict[int, List[int]]
|
||||
if not recipient_ids:
|
||||
return result
|
||||
|
||||
|
@ -2857,7 +2857,7 @@ def bulk_add_subscriptions(streams: Iterable[Stream],
|
|||
acting_user: Optional[UserProfile]=None) -> SubT:
|
||||
users = list(users)
|
||||
|
||||
recipients_map = dict((stream.id, stream.recipient_id) for stream in streams) # type: Dict[int, int]
|
||||
recipients_map = {stream.id: stream.recipient_id for stream in streams} # type: Dict[int, int]
|
||||
recipient_ids = [recipient_id for recipient_id in recipients_map.values()] # type: List[int]
|
||||
|
||||
stream_map = {} # type: Dict[int, Stream]
|
||||
|
@ -4877,7 +4877,7 @@ def gather_subscriptions_helper(user_profile: UserProfile,
|
|||
# Deactivated streams aren't in stream_hash.
|
||||
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
|
||||
if sub["stream_id"] in stream_hash]
|
||||
streams_subscribed_map = dict((sub["stream_id"], sub["active"]) for sub in sub_dicts)
|
||||
streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts}
|
||||
|
||||
# Add never subscribed streams to streams_subscribed_map
|
||||
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
|
||||
|
@ -5577,7 +5577,7 @@ def check_attachment_reference_change(message: Message) -> bool:
|
|||
# For a unsaved message edit (message.* has been updated, but not
|
||||
# saved to the database), adjusts Attachment data to correspond to
|
||||
# the new content.
|
||||
prev_attachments = set([a.path_id for a in message.attachment_set.all()])
|
||||
prev_attachments = {a.path_id for a in message.attachment_set.all()}
|
||||
new_attachments = set(message.potential_attachment_path_ids)
|
||||
|
||||
if new_attachments == prev_attachments:
|
||||
|
@ -5652,7 +5652,7 @@ def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileFiel
|
|||
notify_realm_custom_profile_fields(realm, 'update')
|
||||
|
||||
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
|
||||
order_mapping = dict((_[1], _[0]) for _ in enumerate(order))
|
||||
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
|
||||
fields = CustomProfileField.objects.filter(realm=realm)
|
||||
for field in fields:
|
||||
if field.id not in order_mapping:
|
||||
|
|
|
@ -10,8 +10,8 @@ from typing import Dict, Iterable, List
|
|||
def alert_words_in_realm(realm: Realm) -> Dict[int, List[str]]:
|
||||
users_query = UserProfile.objects.filter(realm=realm, is_active=True)
|
||||
alert_word_data = users_query.filter(~Q(alert_words=ujson.dumps([]))).values('id', 'alert_words')
|
||||
all_user_words = dict((elt['id'], ujson.loads(elt['alert_words'])) for elt in alert_word_data)
|
||||
user_ids_with_words = dict((user_id, w) for (user_id, w) in all_user_words.items() if len(w))
|
||||
all_user_words = {elt['id']: ujson.loads(elt['alert_words']) for elt in alert_word_data}
|
||||
user_ids_with_words = {user_id: w for (user_id, w) in all_user_words.items() if len(w)}
|
||||
return user_ids_with_words
|
||||
|
||||
@cache_with_key(realm_alert_words_automaton_cache_key, timeout=3600*24)
|
||||
|
@ -25,7 +25,7 @@ def get_alert_word_automaton(realm: Realm) -> ahocorasick.Automaton:
|
|||
(key, user_ids_for_alert_word) = alert_word_automaton.get(alert_word_lower)
|
||||
user_ids_for_alert_word.add(user_id)
|
||||
else:
|
||||
alert_word_automaton.add_word(alert_word_lower, (alert_word_lower, set([user_id])))
|
||||
alert_word_automaton.add_word(alert_word_lower, (alert_word_lower, {user_id}))
|
||||
alert_word_automaton.make_automaton()
|
||||
# If the kind is not AHOCORASICK after calling make_automaton, it means there is no key present
|
||||
# and hence we cannot call items on the automaton yet. To avoid it we return None for such cases
|
||||
|
|
|
@ -263,7 +263,7 @@ def list_of_tlds() -> List[str]:
|
|||
|
||||
# tlds-alpha-by-domain.txt comes from https://data.iana.org/TLD/tlds-alpha-by-domain.txt
|
||||
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
|
||||
tlds = [tld.lower().strip() for tld in open(tlds_file, 'r')
|
||||
tlds = [tld.lower().strip() for tld in open(tlds_file)
|
||||
if tld not in blacklist and not tld[0].startswith('#')]
|
||||
tlds.sort(key=len, reverse=True)
|
||||
return tlds
|
||||
|
@ -563,10 +563,10 @@ class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
|
|||
# We strip leading '/' from relative URLs here to ensure
|
||||
# consistency in what gets passed to /thumbnail
|
||||
url = url.lstrip('/')
|
||||
img.set("src", "/thumbnail?url={0}&size=thumbnail".format(
|
||||
img.set("src", "/thumbnail?url={}&size=thumbnail".format(
|
||||
urllib.parse.quote(url, safe='')
|
||||
))
|
||||
img.set('data-src-fullsize', "/thumbnail?url={0}&size=full".format(
|
||||
img.set('data-src-fullsize', "/thumbnail?url={}&size=full".format(
|
||||
urllib.parse.quote(url, safe='')
|
||||
))
|
||||
else:
|
||||
|
@ -1187,7 +1187,7 @@ class Avatar(markdown.inlinepatterns.Pattern):
|
|||
profile_id = user_dict['id']
|
||||
|
||||
img.set('class', 'message_body_gravatar')
|
||||
img.set('src', '/avatar/{0}?s=30'.format(profile_id or email))
|
||||
img.set('src', '/avatar/{}?s=30'.format(profile_id or email))
|
||||
img.set('title', email)
|
||||
img.set('alt', email)
|
||||
return img
|
||||
|
@ -1713,9 +1713,9 @@ def possible_linked_stream_names(content: str) -> Set[str]:
|
|||
|
||||
class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor):
|
||||
|
||||
allowed_before_punctuation = set([' ', '\n', '(', '"', '.', ',', '\'', ';', '[', '*', '`', '>'])
|
||||
allowed_after_punctuation = set([' ', '\n', ')', '",', '?', ':', '.', ',', '\'', ';', ']', '!',
|
||||
'*', '`'])
|
||||
allowed_before_punctuation = {' ', '\n', '(', '"', '.', ',', '\'', ';', '[', '*', '`', '>'}
|
||||
allowed_after_punctuation = {' ', '\n', ')', '",', '?', ':', '.', ',', '\'', ';', ']', '!',
|
||||
'*', '`'}
|
||||
|
||||
def check_valid_start_position(self, content: str, index: int) -> bool:
|
||||
if index <= 0 or content[index] in self.allowed_before_punctuation:
|
||||
|
|
|
@ -30,7 +30,7 @@ class MarkdownArgumentsTableGenerator(Extension):
|
|||
|
||||
class APIArgumentsTablePreprocessor(Preprocessor):
|
||||
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
|
||||
super(APIArgumentsTablePreprocessor, self).__init__(md)
|
||||
super().__init__(md)
|
||||
self.base_path = config['base_path']
|
||||
|
||||
def run(self, lines: List[str]) -> List[str]:
|
||||
|
@ -66,7 +66,7 @@ class APIArgumentsTablePreprocessor(Preprocessor):
|
|||
if e.args != ('parameters',):
|
||||
raise e
|
||||
else:
|
||||
with open(filename, 'r') as fp:
|
||||
with open(filename) as fp:
|
||||
json_obj = json.load(fp)
|
||||
arguments = json_obj[doc_name]
|
||||
|
||||
|
|
|
@ -278,7 +278,7 @@ class APICodeExamplesGenerator(Extension):
|
|||
|
||||
class APICodeExamplesPreprocessor(Preprocessor):
|
||||
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
|
||||
super(APICodeExamplesPreprocessor, self).__init__(md)
|
||||
super().__init__(md)
|
||||
self.api_url = config['api_url']
|
||||
|
||||
def run(self, lines: List[str]) -> List[str]:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
from __future__ import print_function
|
||||
import re
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
@ -42,7 +41,7 @@ class IncludeCustomPreprocessor(IncludePreprocessor):
|
|||
os.path.join(self.base_path, filename)
|
||||
)
|
||||
try:
|
||||
with open(filename, 'r', encoding=self.encoding) as r:
|
||||
with open(filename, encoding=self.encoding) as r:
|
||||
text = r.readlines()
|
||||
except Exception as e:
|
||||
print('Warning: could not find file {}. Error: {}'.format(filename, e))
|
||||
|
|
|
@ -15,7 +15,7 @@ class NestedCodeBlocksRenderer(Extension):
|
|||
|
||||
class NestedCodeBlocksRendererTreeProcessor(markdown.treeprocessors.Treeprocessor):
|
||||
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
|
||||
super(NestedCodeBlocksRendererTreeProcessor, self).__init__(md)
|
||||
super().__init__(md)
|
||||
|
||||
def run(self, root: Element) -> None:
|
||||
code_tags = walk_tree_with_family(root, self.get_code_tags)
|
||||
|
|
|
@ -84,7 +84,7 @@ class TabbedSectionsGenerator(Extension):
|
|||
|
||||
class TabbedSectionsPreprocessor(Preprocessor):
|
||||
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
|
||||
super(TabbedSectionsPreprocessor, self).__init__(md)
|
||||
super().__init__(md)
|
||||
|
||||
def run(self, lines: List[str]) -> List[str]:
|
||||
tab_section = self.parse_tabs(lines)
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
from typing import Any, Dict, Optional
|
||||
import ujson
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ def bulk_set_users_or_streams_recipient_fields(model: Model,
|
|||
object_ids = [obj.id for obj in objects]
|
||||
recipients = Recipient.objects.filter(type=recipient_type, type_id__in=object_ids)
|
||||
|
||||
objects_dict = dict((obj.id, obj) for obj in objects)
|
||||
objects_dict = {obj.id: obj for obj in objects}
|
||||
|
||||
for recipient in recipients:
|
||||
assert recipient.type == recipient_type
|
||||
|
|
|
@ -87,7 +87,7 @@ def get_or_create_key_prefix() -> str:
|
|||
# The file already exists
|
||||
tries = 1
|
||||
while tries < 10:
|
||||
with open(filename, 'r') as f:
|
||||
with open(filename) as f:
|
||||
prefix = f.readline()[:-1]
|
||||
if len(prefix) == 33:
|
||||
break
|
||||
|
@ -244,7 +244,7 @@ def cache_get_many(keys: List[str], cache_name: Optional[str]=None) -> Dict[str,
|
|||
remote_cache_stats_start()
|
||||
ret = get_cache_backend(cache_name).get_many(keys)
|
||||
remote_cache_stats_finish()
|
||||
return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])
|
||||
return {key[len(KEY_PREFIX):]: value for key, value in ret.items()}
|
||||
|
||||
def safe_cache_get_many(keys: List[str], cache_name: Optional[str]=None) -> Dict[str, Any]:
|
||||
"""Variant of cache_get_many that drops any keys that fail
|
||||
|
@ -290,7 +290,7 @@ def safe_cache_set_many(items: Dict[str, Any], cache_name: Optional[str]=None,
|
|||
good_keys, bad_keys = filter_good_and_bad_keys(list(items.keys()))
|
||||
log_invalid_cache_keys(stack_trace, bad_keys)
|
||||
|
||||
good_items = dict((key, items[key]) for key in good_keys)
|
||||
good_items = {key: items[key] for key in good_keys}
|
||||
return cache_set_many(good_items, cache_name, timeout)
|
||||
|
||||
def cache_delete(key: str, cache_name: Optional[str]=None) -> None:
|
||||
|
@ -404,8 +404,8 @@ def generic_bulk_cached_fetch(
|
|||
cached_objects[key] = item
|
||||
if len(items_for_remote_cache) > 0:
|
||||
safe_cache_set_many(items_for_remote_cache)
|
||||
return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids
|
||||
if cache_keys[object_id] in cached_objects)
|
||||
return {object_id: cached_objects[cache_keys[object_id]] for object_id in object_ids
|
||||
if cache_keys[object_id] in cached_objects}
|
||||
|
||||
def preview_url_cache_key(url: str) -> str:
|
||||
return "preview_url:%s" % (make_safe_digest(url),)
|
||||
|
@ -425,7 +425,7 @@ def user_profile_by_email_cache_key(email: str) -> str:
|
|||
return 'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)
|
||||
|
||||
def user_profile_cache_key_id(email: str, realm_id: int) -> str:
|
||||
return u"user_profile:%s:%s" % (make_safe_digest(email.strip()), realm_id,)
|
||||
return "user_profile:%s:%s" % (make_safe_digest(email.strip()), realm_id,)
|
||||
|
||||
def user_profile_cache_key(email: str, realm: 'Realm') -> str:
|
||||
return user_profile_cache_key_id(email, realm.id)
|
||||
|
@ -451,7 +451,7 @@ def realm_user_dicts_cache_key(realm_id: int) -> str:
|
|||
return "realm_user_dicts:%s" % (realm_id,)
|
||||
|
||||
def get_realm_used_upload_space_cache_key(realm: 'Realm') -> str:
|
||||
return u'realm_used_upload_space:%s' % (realm.id,)
|
||||
return 'realm_used_upload_space:%s' % (realm.id,)
|
||||
|
||||
def active_user_ids_cache_key(realm_id: int) -> str:
|
||||
return "active_user_ids:%s" % (realm_id,)
|
||||
|
|
|
@ -74,9 +74,9 @@ def bulk_fetch_display_recipients(recipient_tuples: Set[Tuple[int, int, int]]
|
|||
for recipient in recipient_tuples
|
||||
}
|
||||
|
||||
stream_recipients = set(
|
||||
stream_recipients = {
|
||||
recipient for recipient in recipient_tuples if recipient[1] == Recipient.STREAM
|
||||
)
|
||||
}
|
||||
personal_and_huddle_recipients = recipient_tuples - stream_recipients
|
||||
|
||||
def stream_query_function(recipient_ids: List[int]) -> List[TinyStreamResult]:
|
||||
|
|
|
@ -309,7 +309,7 @@ def do_send_missedmessage_events_reply_in_zulip(user_profile: UserProfile,
|
|||
if not user_profile.enable_offline_email_notifications:
|
||||
return
|
||||
|
||||
recipients = set((msg['message'].recipient_id, msg['message'].topic_name()) for msg in missed_messages)
|
||||
recipients = {(msg['message'].recipient_id, msg['message'].topic_name()) for msg in missed_messages}
|
||||
if len(recipients) != 1:
|
||||
raise ValueError(
|
||||
'All missed_messages must have the same recipient and topic %r' %
|
||||
|
@ -359,7 +359,7 @@ def do_send_missedmessage_events_reply_in_zulip(user_profile: UserProfile,
|
|||
'narrow_url': narrow_url,
|
||||
})
|
||||
|
||||
senders = list(set(m['message'].sender for m in missed_messages))
|
||||
senders = list({m['message'].sender for m in missed_messages})
|
||||
if (missed_messages[0]['message'].recipient.type == Recipient.HUDDLE):
|
||||
display_recipient = get_display_recipient(missed_messages[0]['message'].recipient)
|
||||
# Make sure that this is a list of strings, not a string.
|
||||
|
@ -383,9 +383,9 @@ def do_send_missedmessage_events_reply_in_zulip(user_profile: UserProfile,
|
|||
elif (context['mention'] or context['stream_email_notify']):
|
||||
# Keep only the senders who actually mentioned the user
|
||||
if context['mention']:
|
||||
senders = list(set(m['message'].sender for m in missed_messages
|
||||
senders = list({m['message'].sender for m in missed_messages
|
||||
if m['trigger'] == 'mentioned' or
|
||||
m['trigger'] == 'wildcard_mentioned'))
|
||||
m['trigger'] == 'wildcard_mentioned'})
|
||||
message = missed_messages[0]['message']
|
||||
stream = Stream.objects.only('id', 'name').get(id=message.recipient.type_id)
|
||||
stream_header = "%s > %s" % (stream.name, message.topic_name())
|
||||
|
|
|
@ -21,9 +21,9 @@ EMOTICON_CONVERSIONS = emoji_codes["emoticon_conversions"]
|
|||
possible_emoticons = EMOTICON_CONVERSIONS.keys()
|
||||
possible_emoticon_regexes = (re.escape(emoticon) for emoticon in possible_emoticons)
|
||||
terminal_symbols = ',.;?!()\\[\\] "\'\\n\\t' # from composebox_typeahead.js
|
||||
emoticon_regex = ('(?<![^{0}])(?P<emoticon>('.format(terminal_symbols)
|
||||
emoticon_regex = ('(?<![^{}])(?P<emoticon>('.format(terminal_symbols)
|
||||
+ ')|('.join(possible_emoticon_regexes)
|
||||
+ '))(?![^{0}])'.format(terminal_symbols))
|
||||
+ '))(?![^{}])'.format(terminal_symbols))
|
||||
|
||||
# Translates emoticons to their colon syntax, e.g. `:smiley:`.
|
||||
def translate_emoticons(text: str) -> str:
|
||||
|
|
|
@ -274,7 +274,7 @@ def sanity_check_output(data: TableData) -> None:
|
|||
list(apps.get_app_config('two_factor').get_models(include_auto_created=True)) +
|
||||
list(apps.get_app_config('zerver').get_models(include_auto_created=True))
|
||||
)
|
||||
all_tables_db = set(model._meta.db_table for model in target_models)
|
||||
all_tables_db = {model._meta.db_table for model in target_models}
|
||||
|
||||
# These assertion statements will fire when we add a new database
|
||||
# table that is not included in Zulip's data exports. Generally,
|
||||
|
@ -783,9 +783,9 @@ def sanity_check_stream_data(response: TableData, config: Config, context: Conte
|
|||
# complex to have a sanity check.
|
||||
return
|
||||
|
||||
actual_streams = set([stream.name for stream in Stream.objects.filter(
|
||||
realm=response["zerver_realm"][0]['id'])])
|
||||
streams_in_response = set([stream['name'] for stream in response['zerver_stream']])
|
||||
actual_streams = {stream.name for stream in Stream.objects.filter(
|
||||
realm=response["zerver_realm"][0]['id'])}
|
||||
streams_in_response = {stream['name'] for stream in response['zerver_stream']}
|
||||
|
||||
if len(streams_in_response - actual_streams) > 0:
|
||||
print("Error: Streams not present in the realm were exported:")
|
||||
|
@ -893,12 +893,12 @@ def fetch_huddle_objects(response: TableData, config: Config, context: Context)
|
|||
realm = context['realm']
|
||||
assert config.parent is not None
|
||||
assert config.parent.table is not None
|
||||
user_profile_ids = set(r['id'] for r in response[config.parent.table])
|
||||
user_profile_ids = {r['id'] for r in response[config.parent.table]}
|
||||
|
||||
# First we get all huddles involving someone in the realm.
|
||||
realm_huddle_subs = Subscription.objects.select_related("recipient").filter(
|
||||
recipient__type=Recipient.HUDDLE, user_profile__in=user_profile_ids)
|
||||
realm_huddle_recipient_ids = set(sub.recipient_id for sub in realm_huddle_subs)
|
||||
realm_huddle_recipient_ids = {sub.recipient_id for sub in realm_huddle_subs}
|
||||
|
||||
# Mark all Huddles whose recipient ID contains a cross-realm user.
|
||||
unsafe_huddle_recipient_ids = set()
|
||||
|
@ -914,8 +914,8 @@ def fetch_huddle_objects(response: TableData, config: Config, context: Context)
|
|||
# exporting the users from this realm), at the cost of losing
|
||||
# some of these cross-realm messages.
|
||||
huddle_subs = [sub for sub in realm_huddle_subs if sub.recipient_id not in unsafe_huddle_recipient_ids]
|
||||
huddle_recipient_ids = set(sub.recipient_id for sub in huddle_subs)
|
||||
huddle_ids = set(sub.recipient.type_id for sub in huddle_subs)
|
||||
huddle_recipient_ids = {sub.recipient_id for sub in huddle_subs}
|
||||
huddle_ids = {sub.recipient.type_id for sub in huddle_subs}
|
||||
|
||||
huddle_subscription_dicts = make_raw(huddle_subs)
|
||||
huddle_recipients = make_raw(Recipient.objects.filter(id__in=huddle_recipient_ids))
|
||||
|
@ -953,7 +953,7 @@ def export_usermessages_batch(input_path: Path, output_path: Path,
|
|||
batch of Message objects and adds the corresponding UserMessage
|
||||
objects. (This is called by the export_usermessage_batch
|
||||
management command)."""
|
||||
with open(input_path, "r") as input_file:
|
||||
with open(input_path) as input_file:
|
||||
output = ujson.load(input_file)
|
||||
message_ids = [item['id'] for item in output['zerver_message']]
|
||||
user_profile_ids = set(output['zerver_userprofile_ids'])
|
||||
|
@ -979,7 +979,7 @@ def export_partial_message_files(realm: Realm,
|
|||
output_dir = tempfile.mkdtemp(prefix="zulip-export")
|
||||
|
||||
def get_ids(records: List[Record]) -> Set[int]:
|
||||
return set(x['id'] for x in records)
|
||||
return {x['id'] for x in records}
|
||||
|
||||
# Basic security rule: You can export everything either...
|
||||
# - sent by someone in your exportable_user_ids
|
||||
|
@ -1095,7 +1095,7 @@ def write_message_partial_for_query(realm: Realm, message_query: Any, dump_file_
|
|||
while True:
|
||||
actual_query = message_query.filter(id__gt=min_id)[0:chunk_size]
|
||||
message_chunk = make_raw(actual_query)
|
||||
message_ids = set(m['id'] for m in message_chunk)
|
||||
message_ids = {m['id'] for m in message_chunk}
|
||||
assert len(message_ids.intersection(all_message_ids)) == 0
|
||||
|
||||
all_message_ids.update(message_ids)
|
||||
|
@ -1431,7 +1431,7 @@ def do_write_stats_file_for_realm_export(output_dir: Path) -> None:
|
|||
with open(stats_file, 'w') as f:
|
||||
for fn in fns:
|
||||
f.write(os.path.basename(fn) + '\n')
|
||||
with open(fn, 'r') as filename:
|
||||
with open(fn) as filename:
|
||||
data = ujson.load(filename)
|
||||
for k in sorted(data):
|
||||
f.write('%5d %s\n' % (len(data[k]), k))
|
||||
|
@ -1442,7 +1442,7 @@ def do_write_stats_file_for_realm_export(output_dir: Path) -> None:
|
|||
|
||||
for fn in [avatar_file, uploads_file]:
|
||||
f.write(fn+'\n')
|
||||
with open(fn, 'r') as filename:
|
||||
with open(fn) as filename:
|
||||
data = ujson.load(filename)
|
||||
f.write('%5d records\n' % (len(data),))
|
||||
f.write('\n')
|
||||
|
@ -1652,7 +1652,7 @@ def export_messages_single_user(user_profile: UserProfile, output_dir: Path,
|
|||
actual_query = user_message_query.select_related(
|
||||
"message", "message__sending_client").filter(id__gt=min_id)[0:chunk_size]
|
||||
user_message_chunk = [um for um in actual_query]
|
||||
user_message_ids = set(um.id for um in user_message_chunk)
|
||||
user_message_ids = {um.id for um in user_message_chunk}
|
||||
|
||||
if len(user_message_chunk) == 0:
|
||||
break
|
||||
|
|
|
@ -7,7 +7,7 @@ import os
|
|||
from scripts.lib.zulip_tools import get_or_create_dev_uuid_var_path
|
||||
|
||||
def load_config() -> Dict[str, Any]:
|
||||
with open("zerver/tests/fixtures/config.generate_data.json", "r") as infile:
|
||||
with open("zerver/tests/fixtures/config.generate_data.json") as infile:
|
||||
config = ujson.load(infile)
|
||||
|
||||
return config
|
||||
|
@ -47,7 +47,7 @@ def parse_file(config: Dict[str, Any], gens: Dict[str, Any], corpus_file: str) -
|
|||
|
||||
paragraphs = [] # type: List[str]
|
||||
|
||||
with open(corpus_file, "r") as infile:
|
||||
with open(corpus_file) as infile:
|
||||
# OUR DATA: we need to separate the person talking and what they say
|
||||
paragraphs = remove_line_breaks(infile)
|
||||
paragraphs = add_flair(paragraphs, gens)
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import operator
|
||||
|
||||
from django.conf import settings
|
||||
|
@ -27,7 +26,7 @@ def with_language(string: str, language: str) -> str:
|
|||
@lru_cache()
|
||||
def get_language_list() -> List[Dict[str, Any]]:
|
||||
path = os.path.join(settings.DEPLOY_ROOT, 'locale', 'language_name_map.json')
|
||||
with open(path, 'r') as reader:
|
||||
with open(path) as reader:
|
||||
languages = ujson.load(reader)
|
||||
return languages['name_map']
|
||||
|
||||
|
@ -90,7 +89,7 @@ def get_language_translation_data(language: str) -> Dict[str, str]:
|
|||
language = 'id_ID'
|
||||
path = os.path.join(settings.DEPLOY_ROOT, 'locale', language, 'translations.json')
|
||||
try:
|
||||
with open(path, 'r') as reader:
|
||||
with open(path) as reader:
|
||||
return ujson.load(reader)
|
||||
except FileNotFoundError:
|
||||
print('Translation for {} not found at {}'.format(language, path))
|
||||
|
|
|
@ -112,7 +112,7 @@ You can use the command list_realms to find ID of the realms in this server."""
|
|||
|
||||
if options["users"] is None:
|
||||
return []
|
||||
emails = set([email.strip() for email in options["users"].split(",")])
|
||||
emails = {email.strip() for email in options["users"].split(",")}
|
||||
user_profiles = []
|
||||
for email in emails:
|
||||
user_profiles.append(self.get_user(email, realm))
|
||||
|
|
|
@ -505,13 +505,13 @@ class MessageDict:
|
|||
|
||||
@staticmethod
|
||||
def bulk_hydrate_recipient_info(objs: List[Dict[str, Any]]) -> None:
|
||||
recipient_tuples = set( # We use set to eliminate duplicate tuples.
|
||||
recipient_tuples = { # We use set to eliminate duplicate tuples.
|
||||
(
|
||||
obj['recipient_id'],
|
||||
obj['recipient_type'],
|
||||
obj['recipient_type_id']
|
||||
) for obj in objs
|
||||
)
|
||||
}
|
||||
display_recipients = bulk_fetch_display_recipients(recipient_tuples)
|
||||
|
||||
for obj in objs:
|
||||
|
|
|
@ -14,7 +14,7 @@ def read_stop_words() -> List[str]:
|
|||
global stop_words_list
|
||||
if stop_words_list is None:
|
||||
file_path = os.path.join(settings.DEPLOY_ROOT, "puppet/zulip/files/postgresql/zulip_english.stop")
|
||||
with open(file_path, 'r') as f:
|
||||
with open(file_path) as f:
|
||||
stop_words_list = f.read().splitlines()
|
||||
|
||||
return stop_words_list
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue