mirror of https://github.com/zulip/zulip.git
python: Convert assignment type annotations to Python 3.6 style.
Commit split by tabbott; this has changes to scripts/, tools/, and puppet/. scripts/lib/hash_reqs.py, scripts/lib/setup_venv.py, scripts/lib/zulip_tools.py, and tools/lib/provision.py are excluded so tools/provision still gives the right error message on Ubuntu 16.04 with Python 3.5. Generated by com2ann, with whitespace fixes and various manual fixes for runtime issues: -shebang_rules: List[Rule] = [ +shebang_rules: List["Rule"] = [ -trailing_whitespace_rule: Rule = { +trailing_whitespace_rule: "Rule" = { -whitespace_rules: List[Rule] = [ +whitespace_rules: List["Rule"] = [ -comma_whitespace_rule: List[Rule] = [ +comma_whitespace_rule: List["Rule"] = [ -prose_style_rules: List[Rule] = [ +prose_style_rules: List["Rule"] = [ -html_rules: List[Rule] = whitespace_rules + prose_style_rules + [ +html_rules: List["Rule"] = whitespace_rules + prose_style_rules + [ - target_port: int = None + target_port: int Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
This commit is contained in:
parent
ad07814fa4
commit
f8339f019d
|
@ -155,7 +155,7 @@ send_zulip(zulip_sender, {
|
||||||
"to": recipient.email,
|
"to": recipient.email,
|
||||||
})
|
})
|
||||||
|
|
||||||
msg_content = [] # type: List[str]
|
msg_content: List[str] = []
|
||||||
|
|
||||||
while msg_to_send not in msg_content:
|
while msg_to_send not in msg_content:
|
||||||
messages = get_zulips()
|
messages = get_zulips()
|
||||||
|
|
|
@ -12,14 +12,14 @@ from typing import Dict
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
RESULTS_DIR = "/home/zulip/mirror_status" # type: str
|
RESULTS_DIR: str = "/home/zulip/mirror_status"
|
||||||
|
|
||||||
states = {
|
states: Dict[str, int] = {
|
||||||
"OK": 0,
|
"OK": 0,
|
||||||
"WARNING": 1,
|
"WARNING": 1,
|
||||||
"CRITICAL": 2,
|
"CRITICAL": 2,
|
||||||
"UNKNOWN": 3
|
"UNKNOWN": 3
|
||||||
} # type: Dict[str, int]
|
}
|
||||||
|
|
||||||
def report(state: str, output: str) -> None:
|
def report(state: str, output: str) -> None:
|
||||||
print("%s\n%s" % (state, output))
|
print("%s\n%s" % (state, output))
|
||||||
|
|
|
@ -28,12 +28,12 @@ from zerver.models import UserActivity
|
||||||
|
|
||||||
from typing import Any, Dict, Set, Optional
|
from typing import Any, Dict, Set, Optional
|
||||||
|
|
||||||
states = {
|
states: Dict[str, int] = {
|
||||||
"OK": 0,
|
"OK": 0,
|
||||||
"WARNING": 1,
|
"WARNING": 1,
|
||||||
"CRITICAL": 2,
|
"CRITICAL": 2,
|
||||||
"UNKNOWN": 3
|
"UNKNOWN": 3
|
||||||
} # type: Dict[str, int]
|
}
|
||||||
|
|
||||||
def report(state: str, short_msg: str, too_old: Optional[Set[Any]] = None) -> None:
|
def report(state: str, short_msg: str, too_old: Optional[Set[Any]] = None) -> None:
|
||||||
too_old_data = ""
|
too_old_data = ""
|
||||||
|
|
|
@ -15,12 +15,12 @@ import time
|
||||||
|
|
||||||
RESULTS_FILE = "/var/lib/nagios_state/check-mirroring-results"
|
RESULTS_FILE = "/var/lib/nagios_state/check-mirroring-results"
|
||||||
|
|
||||||
states = {
|
states: Dict[str, int] = {
|
||||||
"OK": 0,
|
"OK": 0,
|
||||||
"WARNING": 1,
|
"WARNING": 1,
|
||||||
"CRITICAL": 2,
|
"CRITICAL": 2,
|
||||||
"UNKNOWN": 3
|
"UNKNOWN": 3
|
||||||
} # type: Dict[str, int]
|
}
|
||||||
|
|
||||||
def report(state: str, data: str, last_check: float) -> None:
|
def report(state: str, data: str, last_check: float) -> None:
|
||||||
print("%s: Last test run completed at %s\n%s" % (
|
print("%s: Last test run completed at %s\n%s" % (
|
||||||
|
|
|
@ -40,28 +40,28 @@ states = {
|
||||||
3: "UNKNOWN"
|
3: "UNKNOWN"
|
||||||
}
|
}
|
||||||
|
|
||||||
MAX_SECONDS_TO_CLEAR_FOR_BURSTS = defaultdict(
|
MAX_SECONDS_TO_CLEAR_FOR_BURSTS: DefaultDict[str, int] = defaultdict(
|
||||||
lambda: 120,
|
lambda: 120,
|
||||||
digest_emails=600,
|
digest_emails=600,
|
||||||
slow_queries=600,
|
slow_queries=600,
|
||||||
) # type: DefaultDict[str, int]
|
)
|
||||||
MAX_SECONDS_TO_CLEAR_NORMAL = defaultdict(
|
MAX_SECONDS_TO_CLEAR_NORMAL: DefaultDict[str, int] = defaultdict(
|
||||||
lambda: 30,
|
lambda: 30,
|
||||||
digest_emails=1200,
|
digest_emails=1200,
|
||||||
slow_queries=120,
|
slow_queries=120,
|
||||||
missedmessage_mobile_notifications=120,
|
missedmessage_mobile_notifications=120,
|
||||||
) # type: DefaultDict[str, int]
|
)
|
||||||
CRITICAL_SECONDS_TO_CLEAR_FOR_BURSTS = defaultdict(
|
CRITICAL_SECONDS_TO_CLEAR_FOR_BURSTS: DefaultDict[str, int] = defaultdict(
|
||||||
lambda: 240,
|
lambda: 240,
|
||||||
digest_emails=1200,
|
digest_emails=1200,
|
||||||
slow_queries=1200,
|
slow_queries=1200,
|
||||||
) # type: DefaultDict[str, int]
|
)
|
||||||
CRITICAL_SECONDS_TO_CLEAR_NORMAL = defaultdict(
|
CRITICAL_SECONDS_TO_CLEAR_NORMAL: DefaultDict[str, int] = defaultdict(
|
||||||
lambda: 60,
|
lambda: 60,
|
||||||
missedmessage_mobile_notifications=180,
|
missedmessage_mobile_notifications=180,
|
||||||
digest_emails=600,
|
digest_emails=600,
|
||||||
slow_queries=600,
|
slow_queries=600,
|
||||||
) # type: DefaultDict[str, int]
|
)
|
||||||
|
|
||||||
def analyze_queue_stats(queue_name: str, stats: Dict[str, Any],
|
def analyze_queue_stats(queue_name: str, stats: Dict[str, Any],
|
||||||
queue_count_rabbitmqctl: int) -> Dict[str, Any]:
|
queue_count_rabbitmqctl: int) -> Dict[str, Any]:
|
||||||
|
@ -183,7 +183,7 @@ def check_rabbitmq_queues() -> None:
|
||||||
queue_stats_dir = subprocess.check_output([os.path.join(ZULIP_PATH, 'scripts/get-django-setting'),
|
queue_stats_dir = subprocess.check_output([os.path.join(ZULIP_PATH, 'scripts/get-django-setting'),
|
||||||
'QUEUE_STATS_DIR'],
|
'QUEUE_STATS_DIR'],
|
||||||
universal_newlines=True).strip()
|
universal_newlines=True).strip()
|
||||||
queue_stats = dict() # type: Dict[str, Dict[str, Any]]
|
queue_stats: Dict[str, Dict[str, Any]] = dict()
|
||||||
queues_to_check = set(normal_queues).intersection(set(queues_with_consumers))
|
queues_to_check = set(normal_queues).intersection(set(queues_with_consumers))
|
||||||
for queue in queues_to_check:
|
for queue in queues_to_check:
|
||||||
fn = queue + ".stats"
|
fn = queue + ".stats"
|
||||||
|
|
|
@ -15,7 +15,7 @@ from scripts.lib.zulip_tools import DEPLOYMENTS_DIR, FAIL, ENDC, \
|
||||||
su_to_zulip, get_deployment_lock, release_deployment_lock, assert_running_as_root, \
|
su_to_zulip, get_deployment_lock, release_deployment_lock, assert_running_as_root, \
|
||||||
get_config_file, get_deploy_options
|
get_config_file, get_deploy_options
|
||||||
|
|
||||||
config_file = get_config_file() # type: configparser.RawConfigParser
|
config_file: configparser.RawConfigParser = get_config_file()
|
||||||
deploy_options = get_deploy_options(config_file)
|
deploy_options = get_deploy_options(config_file)
|
||||||
|
|
||||||
assert_running_as_root(strip_lib_from_paths=True)
|
assert_running_as_root(strip_lib_from_paths=True)
|
||||||
|
|
|
@ -46,7 +46,7 @@ TORNADO_PROCESSES = int(get_config('application_server', 'tornado_processes', '1
|
||||||
output = subprocess.check_output(['/usr/sbin/rabbitmqctl', 'list_consumers'],
|
output = subprocess.check_output(['/usr/sbin/rabbitmqctl', 'list_consumers'],
|
||||||
universal_newlines=True)
|
universal_newlines=True)
|
||||||
|
|
||||||
consumers = defaultdict(int) # type: Dict[str, int]
|
consumers: Dict[str, int] = defaultdict(int)
|
||||||
|
|
||||||
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
|
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
|
||||||
queues = set(normal_queues).union({
|
queues = set(normal_queues).union({
|
||||||
|
|
|
@ -52,7 +52,7 @@ def generate_secrets(development: bool = False) -> None:
|
||||||
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
|
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
|
||||||
current_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
|
current_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
|
||||||
|
|
||||||
lines = [] # type: List[str]
|
lines: List[str] = []
|
||||||
if len(current_conf) == 0:
|
if len(current_conf) == 0:
|
||||||
lines = ['[secrets]\n']
|
lines = ['[secrets]\n']
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ def check_issue_labels() -> None:
|
||||||
"your api token. If you want to continue without using a token use --force.")
|
"your api token. If you want to continue without using a token use --force.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
next_page_url = 'https://api.github.com/repos/zulip/zulip/issues' # type: Optional[str]
|
next_page_url: Optional[str] = 'https://api.github.com/repos/zulip/zulip/issues'
|
||||||
unlabeled_issue_urls = []
|
unlabeled_issue_urls = []
|
||||||
while next_page_url:
|
while next_page_url:
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -96,7 +96,7 @@ def check_html_templates(templates: Iterable[str], all_dups: bool, fix: bool) ->
|
||||||
print(fn)
|
print(fn)
|
||||||
return bad_ids_dict
|
return bad_ids_dict
|
||||||
|
|
||||||
bad_ids_list = [] # type: List[str]
|
bad_ids_list: List[str] = []
|
||||||
archive_templates = list(filter(
|
archive_templates = list(filter(
|
||||||
lambda fn: ('templates/zerver/archive' in fn),
|
lambda fn: ('templates/zerver/archive' in fn),
|
||||||
templates))
|
templates))
|
||||||
|
|
|
@ -109,7 +109,7 @@ def create_user_docs() -> None:
|
||||||
for line in open(fn):
|
for line in open(fn):
|
||||||
calls.append(ujson.loads(line))
|
calls.append(ujson.loads(line))
|
||||||
|
|
||||||
pattern_dict = defaultdict(list) # type: Dict[str, List[Call]]
|
pattern_dict: Dict[str, List[Call]] = defaultdict(list)
|
||||||
for call in calls:
|
for call in calls:
|
||||||
if 'pattern' in call:
|
if 'pattern' in call:
|
||||||
pattern = clean_up_pattern(call['pattern'])
|
pattern = clean_up_pattern(call['pattern'])
|
||||||
|
@ -124,7 +124,7 @@ def create_user_docs() -> None:
|
||||||
('json', 'legacy'),
|
('json', 'legacy'),
|
||||||
]
|
]
|
||||||
|
|
||||||
groups = dict() # type: Dict[str, Set[str]]
|
groups: Dict[str, Set[str]] = dict()
|
||||||
for prefix, name in tups:
|
for prefix, name in tups:
|
||||||
groups[name] = {p for p in patterns if p.startswith(prefix)}
|
groups[name] = {p for p in patterns if p.startswith(prefix)}
|
||||||
patterns -= groups[name]
|
patterns -= groups[name]
|
||||||
|
|
|
@ -20,8 +20,8 @@ class UnusedImagesLinterSpider(BaseDocumentationSpider):
|
||||||
|
|
||||||
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self.static_images = set() # type: Set[str]
|
self.static_images: Set[str] = set()
|
||||||
self.images_static_dir = get_images_dir(self.images_path) # type: str
|
self.images_static_dir: str = get_images_dir(self.images_path)
|
||||||
|
|
||||||
def _is_external_url(self, url: str) -> bool:
|
def _is_external_url(self, url: str) -> bool:
|
||||||
is_external = url.startswith('http') and self.start_urls[0] not in url
|
is_external = url.startswith('http') and self.start_urls[0] not in url
|
||||||
|
@ -43,7 +43,7 @@ class UnusedImagesLinterSpider(BaseDocumentationSpider):
|
||||||
class HelpDocumentationSpider(UnusedImagesLinterSpider):
|
class HelpDocumentationSpider(UnusedImagesLinterSpider):
|
||||||
name = "help_documentation_crawler"
|
name = "help_documentation_crawler"
|
||||||
start_urls = ['http://localhost:9981/help']
|
start_urls = ['http://localhost:9981/help']
|
||||||
deny_domains = [] # type: List[str]
|
deny_domains: List[str] = []
|
||||||
deny = ['/privacy']
|
deny = ['/privacy']
|
||||||
images_path = "static/images/help"
|
images_path = "static/images/help"
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ class HelpDocumentationSpider(UnusedImagesLinterSpider):
|
||||||
class APIDocumentationSpider(UnusedImagesLinterSpider):
|
class APIDocumentationSpider(UnusedImagesLinterSpider):
|
||||||
name = 'api_documentation_crawler'
|
name = 'api_documentation_crawler'
|
||||||
start_urls = ['http://localhost:9981/api']
|
start_urls = ['http://localhost:9981/api']
|
||||||
deny_domains = [] # type: List[str]
|
deny_domains: List[str] = []
|
||||||
images_path = "static/images/api"
|
images_path = "static/images/api"
|
||||||
|
|
||||||
class PorticoDocumentationSpider(BaseDocumentationSpider):
|
class PorticoDocumentationSpider(BaseDocumentationSpider):
|
||||||
|
@ -79,4 +79,4 @@ class PorticoDocumentationSpider(BaseDocumentationSpider):
|
||||||
'http://localhost:9981/for/working-groups-and-communities',
|
'http://localhost:9981/for/working-groups-and-communities',
|
||||||
'http://localhost:9981/for/mystery-hunt',
|
'http://localhost:9981/for/mystery-hunt',
|
||||||
'http://localhost:9981/security']
|
'http://localhost:9981/security']
|
||||||
deny_domains = [] # type: List[str]
|
deny_domains: List[str] = []
|
||||||
|
|
|
@ -44,12 +44,12 @@ VNU_IGNORE = re.compile(r'|'.join([
|
||||||
|
|
||||||
|
|
||||||
class BaseDocumentationSpider(scrapy.Spider):
|
class BaseDocumentationSpider(scrapy.Spider):
|
||||||
name = None # type: Optional[str]
|
name: Optional[str] = None
|
||||||
# Exclude domain address.
|
# Exclude domain address.
|
||||||
deny_domains = [] # type: List[str]
|
deny_domains: List[str] = []
|
||||||
start_urls = [] # type: List[str]
|
start_urls: List[str] = []
|
||||||
deny = [] # type: List[str]
|
deny: List[str] = []
|
||||||
file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]
|
file_extensions: List[str] = ['.' + ext for ext in IGNORED_EXTENSIONS]
|
||||||
tags = ('a', 'area', 'img')
|
tags = ('a', 'area', 'img')
|
||||||
attrs = ('href', 'src')
|
attrs = ('href', 'src')
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ class BaseDocumentationSpider(scrapy.Spider):
|
||||||
return callback
|
return callback
|
||||||
|
|
||||||
def _make_requests(self, url: str) -> Iterable[Request]:
|
def _make_requests(self, url: str) -> Iterable[Request]:
|
||||||
callback = self.parse # type: Callable[[Response], Optional[Iterable[Request]]]
|
callback: Callable[[Response], Optional[Iterable[Request]]] = self.parse
|
||||||
dont_filter = False
|
dont_filter = False
|
||||||
method = 'GET'
|
method = 'GET'
|
||||||
if self._is_external_url(url):
|
if self._is_external_url(url):
|
||||||
|
|
|
@ -44,7 +44,7 @@ ContributorsJSON = TypedDict('ContributorsJSON', {
|
||||||
logger = logging.getLogger('zulip.fetch_contributors_json')
|
logger = logging.getLogger('zulip.fetch_contributors_json')
|
||||||
|
|
||||||
def fetch_contributors(repo_link: str) -> Optional[List[Dict[str, Dict[str, Any]]]]:
|
def fetch_contributors(repo_link: str) -> Optional[List[Dict[str, Dict[str, Any]]]]:
|
||||||
r = requests.get(repo_link, verify=os.environ.get('CUSTOM_CA_CERTIFICATES')) # type: requests.Response
|
r: requests.Response = requests.get(repo_link, verify=os.environ.get('CUSTOM_CA_CERTIFICATES'))
|
||||||
return r.json() if r.status_code == 200 else None
|
return r.json() if r.status_code == 200 else None
|
||||||
|
|
||||||
def write_to_disk(json_data: ContributorsJSON, out_file: str) -> None:
|
def write_to_disk(json_data: ContributorsJSON, out_file: str) -> None:
|
||||||
|
@ -77,8 +77,8 @@ def update_contributor_data_file() -> None:
|
||||||
'zulip-android': 'https://api.github.com/repos/zulip/zulip-android/stats/contributors',
|
'zulip-android': 'https://api.github.com/repos/zulip/zulip-android/stats/contributors',
|
||||||
}
|
}
|
||||||
|
|
||||||
data = dict(date=str(date.today()), contrib=[]) # type: ContributorsJSON
|
data: ContributorsJSON = dict(date=str(date.today()), contrib=[])
|
||||||
contribs_list = {} # type: Dict[str, Dict[str, Union[str, int]]]
|
contribs_list: Dict[str, Dict[str, Union[str, int]]] = {}
|
||||||
retry_attempts = 0
|
retry_attempts = 0
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
|
@ -28,7 +28,7 @@ def get_translation_stats(resource_path: str) -> Dict[str, int]:
|
||||||
not_translated = len([i for i in raw_info.items() if i[1] == ''])
|
not_translated = len([i for i in raw_info.items() if i[1] == ''])
|
||||||
return {'total': total, 'not_translated': not_translated}
|
return {'total': total, 'not_translated': not_translated}
|
||||||
|
|
||||||
translation_stats = {} # type: Dict[str, Dict[str, int]]
|
translation_stats: Dict[str, Dict[str, int]] = {}
|
||||||
locale_paths = [] # List[str]
|
locale_paths = [] # List[str]
|
||||||
for locale in get_locales():
|
for locale in get_locales():
|
||||||
path = get_json_filename(locale)
|
path = get_json_filename(locale)
|
||||||
|
|
|
@ -26,7 +26,7 @@ class HtmlTreeBranch:
|
||||||
self.fn = fn
|
self.fn = fn
|
||||||
self.line = tags[-1].token.line
|
self.line = tags[-1].token.line
|
||||||
|
|
||||||
self.words = set() # type: Set[str]
|
self.words: Set[str] = set()
|
||||||
for tag in tags:
|
for tag in tags:
|
||||||
for word in tag.words:
|
for word in tag.words:
|
||||||
self.words.add(word)
|
self.words.add(word)
|
||||||
|
@ -60,8 +60,8 @@ class Node:
|
||||||
def __init__(self, token: Token, parent: "Optional[Node]") -> None:
|
def __init__(self, token: Token, parent: "Optional[Node]") -> None:
|
||||||
# FIXME parent parameter is not used!
|
# FIXME parent parameter is not used!
|
||||||
self.token = token
|
self.token = token
|
||||||
self.children = [] # type: List[Node]
|
self.children: List[Node] = []
|
||||||
self.parent = None # type: Optional[Node]
|
self.parent: Optional[Node] = None
|
||||||
|
|
||||||
|
|
||||||
class TagInfo:
|
class TagInfo:
|
||||||
|
@ -87,8 +87,8 @@ class TagInfo:
|
||||||
def get_tag_info(token: Token) -> TagInfo:
|
def get_tag_info(token: Token) -> TagInfo:
|
||||||
s = token.s
|
s = token.s
|
||||||
tag = token.tag
|
tag = token.tag
|
||||||
classes = [] # type: List[str]
|
classes: List[str] = []
|
||||||
ids = [] # type: List[str]
|
ids: List[str] = []
|
||||||
|
|
||||||
searches = [
|
searches = [
|
||||||
(classes, ' class="(.*?)"'),
|
(classes, ' class="(.*?)"'),
|
||||||
|
@ -111,7 +111,7 @@ def split_for_id_and_class(element: str) -> List[str]:
|
||||||
# attributes from HTML tags. This also takes care of template variables
|
# attributes from HTML tags. This also takes care of template variables
|
||||||
# in string during splitting process. For eg. 'red black {{ a|b|c }}'
|
# in string during splitting process. For eg. 'red black {{ a|b|c }}'
|
||||||
# is split as ['red', 'black', '{{ a|b|c }}']
|
# is split as ['red', 'black', '{{ a|b|c }}']
|
||||||
outside_braces = True # type: bool
|
outside_braces: bool = True
|
||||||
lst = []
|
lst = []
|
||||||
s = ''
|
s = ''
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ def split_for_id_and_class(element: str) -> List[str]:
|
||||||
|
|
||||||
def html_branches(text: str, fn: Optional[str] = None) -> List[HtmlTreeBranch]:
|
def html_branches(text: str, fn: Optional[str] = None) -> List[HtmlTreeBranch]:
|
||||||
tree = html_tag_tree(text)
|
tree = html_tag_tree(text)
|
||||||
branches = [] # type: List[HtmlTreeBranch]
|
branches: List[HtmlTreeBranch] = []
|
||||||
|
|
||||||
def walk(node: Node, tag_info_list: Optional[List[TagInfo]] = None) -> None:
|
def walk(node: Node, tag_info_list: Optional[List[TagInfo]] = None) -> None:
|
||||||
info = get_tag_info(node.token)
|
info = get_tag_info(node.token)
|
||||||
|
@ -179,7 +179,7 @@ def html_tag_tree(text: str) -> Node:
|
||||||
|
|
||||||
|
|
||||||
def build_id_dict(templates: List[str]) -> (Dict[str, List[str]]):
|
def build_id_dict(templates: List[str]) -> (Dict[str, List[str]]):
|
||||||
template_id_dict = defaultdict(list) # type: (Dict[str, List[str]])
|
template_id_dict: (Dict[str, List[str]]) = defaultdict(list)
|
||||||
|
|
||||||
for fn in templates:
|
for fn in templates:
|
||||||
with open(fn) as f:
|
with open(fn) as f:
|
||||||
|
|
|
@ -17,23 +17,24 @@ def pretty_print_html(html: str, num_spaces: int = 4) -> str:
|
||||||
# We will keep a stack of "start" tags so that we know
|
# We will keep a stack of "start" tags so that we know
|
||||||
# when HTML ranges end. Note that some start tags won't
|
# when HTML ranges end. Note that some start tags won't
|
||||||
# be blocks from an indentation standpoint.
|
# be blocks from an indentation standpoint.
|
||||||
stack = [] # type: List[Dict[str, Any]]
|
stack: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
# Seed our stack with a pseudo entry to make depth calculations
|
# Seed our stack with a pseudo entry to make depth calculations
|
||||||
# easier.
|
# easier.
|
||||||
info = dict(
|
info: Dict[str, Any] = dict(
|
||||||
block=False,
|
block=False,
|
||||||
depth=-1,
|
depth=-1,
|
||||||
line=-1,
|
line=-1,
|
||||||
token_kind='html_start',
|
token_kind='html_start',
|
||||||
tag='html',
|
tag='html',
|
||||||
extra_indent=0,
|
extra_indent=0,
|
||||||
ignore_lines=[]) # type: Dict[str, Any]
|
ignore_lines=[],
|
||||||
|
)
|
||||||
stack.append(info)
|
stack.append(info)
|
||||||
|
|
||||||
# Our main job is to figure out offsets that we use to nudge lines
|
# Our main job is to figure out offsets that we use to nudge lines
|
||||||
# over by.
|
# over by.
|
||||||
offsets = {} # type: Dict[int, int]
|
offsets: Dict[int, int] = {}
|
||||||
|
|
||||||
# Loop through our start/end tokens, and calculate offsets. As
|
# Loop through our start/end tokens, and calculate offsets. As
|
||||||
# we proceed, we will push/pop info dictionaries on/off a stack.
|
# we proceed, we will push/pop info dictionaries on/off a stack.
|
||||||
|
|
|
@ -51,7 +51,7 @@ FILES_WITH_LEGACY_SUBJECT = {
|
||||||
'zerver/tests/test_narrow.py',
|
'zerver/tests/test_narrow.py',
|
||||||
}
|
}
|
||||||
|
|
||||||
shebang_rules = [
|
shebang_rules: List["Rule"] = [
|
||||||
{'pattern': '^#!',
|
{'pattern': '^#!',
|
||||||
'description': "zerver library code shouldn't have a shebang line.",
|
'description': "zerver library code shouldn't have a shebang line.",
|
||||||
'include_only': {'zerver/'}},
|
'include_only': {'zerver/'}},
|
||||||
|
@ -63,14 +63,14 @@ shebang_rules = [
|
||||||
" for interpreters other than sh."},
|
" for interpreters other than sh."},
|
||||||
{'pattern': '^#!/usr/bin/env python$',
|
{'pattern': '^#!/usr/bin/env python$',
|
||||||
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/env python`."}
|
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/env python`."}
|
||||||
] # type: List[Rule]
|
]
|
||||||
|
|
||||||
trailing_whitespace_rule = {
|
trailing_whitespace_rule: "Rule" = {
|
||||||
'pattern': r'\s+$',
|
'pattern': r'\s+$',
|
||||||
'strip': '\n',
|
'strip': '\n',
|
||||||
'description': 'Fix trailing whitespace'
|
'description': 'Fix trailing whitespace'
|
||||||
} # type: Rule
|
}
|
||||||
whitespace_rules = [
|
whitespace_rules: List["Rule"] = [
|
||||||
# This linter should be first since bash_rules depends on it.
|
# This linter should be first since bash_rules depends on it.
|
||||||
trailing_whitespace_rule,
|
trailing_whitespace_rule,
|
||||||
{'pattern': 'http://zulip.readthedocs.io',
|
{'pattern': 'http://zulip.readthedocs.io',
|
||||||
|
@ -80,14 +80,14 @@ whitespace_rules = [
|
||||||
'strip': '\n',
|
'strip': '\n',
|
||||||
'exclude': {'tools/ci/success-http-headers.txt'},
|
'exclude': {'tools/ci/success-http-headers.txt'},
|
||||||
'description': 'Fix tab-based whitespace'},
|
'description': 'Fix tab-based whitespace'},
|
||||||
] # type: List[Rule]
|
]
|
||||||
comma_whitespace_rule = [
|
comma_whitespace_rule: List["Rule"] = [
|
||||||
{'pattern': ', {2,}[^#/ ]',
|
{'pattern': ', {2,}[^#/ ]',
|
||||||
'exclude': {'zerver/tests', 'frontend_tests/node_tests', 'corporate/tests'},
|
'exclude': {'zerver/tests', 'frontend_tests/node_tests', 'corporate/tests'},
|
||||||
'description': "Remove multiple whitespaces after ','",
|
'description': "Remove multiple whitespaces after ','",
|
||||||
'good_lines': ['foo(1, 2, 3)', 'foo = bar # some inline comment'],
|
'good_lines': ['foo(1, 2, 3)', 'foo = bar # some inline comment'],
|
||||||
'bad_lines': ['foo(1, 2, 3)', 'foo(1, 2, 3)']},
|
'bad_lines': ['foo(1, 2, 3)', 'foo(1, 2, 3)']},
|
||||||
] # type: List[Rule]
|
]
|
||||||
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']) + [
|
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']) + [
|
||||||
# Two spaces trailing a line with other content is okay--it's a markdown line break.
|
# Two spaces trailing a line with other content is okay--it's a markdown line break.
|
||||||
# This rule finds one space trailing a non-space, three or more trailing spaces, and
|
# This rule finds one space trailing a non-space, three or more trailing spaces, and
|
||||||
|
@ -560,7 +560,7 @@ css_rules = RuleList(
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
prose_style_rules = [
|
prose_style_rules: List["Rule"] = [
|
||||||
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
|
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
|
||||||
'exclude': {"docs/documentation/api.md"},
|
'exclude': {"docs/documentation/api.md"},
|
||||||
'description': "javascript should be spelled JavaScript"},
|
'description': "javascript should be spelled JavaScript"},
|
||||||
|
@ -576,8 +576,8 @@ prose_style_rules = [
|
||||||
{'pattern': '[^-_p]botserver(?!rc)|bot server',
|
{'pattern': '[^-_p]botserver(?!rc)|bot server',
|
||||||
'description': "Use Botserver instead of botserver or bot server."},
|
'description': "Use Botserver instead of botserver or bot server."},
|
||||||
*comma_whitespace_rule,
|
*comma_whitespace_rule,
|
||||||
] # type: List[Rule]
|
]
|
||||||
html_rules = whitespace_rules + prose_style_rules + [
|
html_rules: List["Rule"] = whitespace_rules + prose_style_rules + [
|
||||||
{'pattern': 'subject|SUBJECT',
|
{'pattern': 'subject|SUBJECT',
|
||||||
'exclude': {'templates/zerver/email.html'},
|
'exclude': {'templates/zerver/email.html'},
|
||||||
'exclude_pattern': 'email subject',
|
'exclude_pattern': 'email subject',
|
||||||
|
@ -703,7 +703,7 @@ html_rules = whitespace_rules + prose_style_rules + [
|
||||||
},
|
},
|
||||||
'good_lines': ['#my-style {color: blue;}', 'style="display: none"', "style='display: none"],
|
'good_lines': ['#my-style {color: blue;}', 'style="display: none"', "style='display: none"],
|
||||||
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
|
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
|
||||||
] # type: List[Rule]
|
]
|
||||||
|
|
||||||
handlebars_rules = RuleList(
|
handlebars_rules = RuleList(
|
||||||
langs=['hbs'],
|
langs=['hbs'],
|
||||||
|
|
|
@ -19,7 +19,7 @@ def validate_order(order: List[int], length: int) -> None:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def renumber_migration(conflicts: List[str], order: List[int], last_correct_migration: str) -> None:
|
def renumber_migration(conflicts: List[str], order: List[int], last_correct_migration: str) -> None:
|
||||||
stack = [] # type: List[str]
|
stack: List[str] = []
|
||||||
for i in order:
|
for i in order:
|
||||||
if conflicts[i-1][0:4] not in stack:
|
if conflicts[i-1][0:4] not in stack:
|
||||||
stack.append(conflicts[i-1][0:4])
|
stack.append(conflicts[i-1][0:4])
|
||||||
|
@ -53,8 +53,8 @@ def resolve_conflicts(conflicts: List[str], files_list: List[str]) -> None:
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
conflicts = [] # type: List[str]
|
conflicts: List[str] = []
|
||||||
stack = [] # type: List[str]
|
stack: List[str] = []
|
||||||
files_list = [os.path.basename(path) for path in glob.glob("zerver/migrations/????_*.py")]
|
files_list = [os.path.basename(path) for path in glob.glob("zerver/migrations/????_*.py")]
|
||||||
file_index = [file[0:4] for file in files_list]
|
file_index = [file[0:4] for file in files_list]
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ if options.interface is None:
|
||||||
elif options.interface == "":
|
elif options.interface == "":
|
||||||
options.interface = None
|
options.interface = None
|
||||||
|
|
||||||
runserver_args = [] # type: List[str]
|
runserver_args: List[str] = []
|
||||||
base_port = 9991
|
base_port = 9991
|
||||||
if options.test:
|
if options.test:
|
||||||
base_port = 9981
|
base_port = 9981
|
||||||
|
@ -199,9 +199,9 @@ def fetch_request(url: str, callback: Any, **kwargs: Any) -> "Generator[Callable
|
||||||
|
|
||||||
class BaseHandler(web.RequestHandler):
|
class BaseHandler(web.RequestHandler):
|
||||||
# target server ip
|
# target server ip
|
||||||
target_host = '127.0.0.1' # type: str
|
target_host: str = '127.0.0.1'
|
||||||
# target server port
|
# target server port
|
||||||
target_port = None # type: int
|
target_port: int
|
||||||
|
|
||||||
def _add_request_headers(
|
def _add_request_headers(
|
||||||
self, exclude_lower_headers_list: Optional[List[str]] = None
|
self, exclude_lower_headers_list: Optional[List[str]] = None
|
||||||
|
|
|
@ -69,7 +69,7 @@ if not python_files and not pyi_files:
|
||||||
print("There are no files to run mypy on.")
|
print("There are no files to run mypy on.")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
mypy_args = [] # type: List[str]
|
mypy_args: List[str] = []
|
||||||
if args.quiet:
|
if args.quiet:
|
||||||
mypy_args += ["--no-error-summary"]
|
mypy_args += ["--no-error-summary"]
|
||||||
mypy_args += ["--"] + python_files + pyi_files
|
mypy_args += ["--"] + python_files + pyi_files
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
EMOJI_NAME_MAPS = {
|
EMOJI_NAME_MAPS: Dict[str, Dict[str, Any]] = {
|
||||||
# seems like best emoji for happy
|
# seems like best emoji for happy
|
||||||
'1f600': {'canonical_name': 'grinning', 'aliases': ['happy']},
|
'1f600': {'canonical_name': 'grinning', 'aliases': ['happy']},
|
||||||
'1f603': {'canonical_name': 'smiley', 'aliases': []},
|
'1f603': {'canonical_name': 'smiley', 'aliases': []},
|
||||||
|
@ -1528,4 +1528,4 @@ EMOJI_NAME_MAPS = {
|
||||||
'1f6a9': {'canonical_name': 'triangular_flag', 'aliases': []},
|
'1f6a9': {'canonical_name': 'triangular_flag', 'aliases': []},
|
||||||
# solidarity from iemoji
|
# solidarity from iemoji
|
||||||
'1f38c': {'canonical_name': 'crossed_flags', 'aliases': ['solidarity']},
|
'1f38c': {'canonical_name': 'crossed_flags', 'aliases': ['solidarity']},
|
||||||
} # type: Dict[str, Dict[str, Any]]
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ EMOTICON_CONVERSIONS = {
|
||||||
}
|
}
|
||||||
|
|
||||||
def emoji_names_for_picker(emoji_name_maps: Dict[str, Dict[str, Any]]) -> List[str]:
|
def emoji_names_for_picker(emoji_name_maps: Dict[str, Dict[str, Any]]) -> List[str]:
|
||||||
emoji_names = [] # type: List[str]
|
emoji_names: List[str] = []
|
||||||
for emoji_code, name_info in emoji_name_maps.items():
|
for emoji_code, name_info in emoji_name_maps.items():
|
||||||
emoji_names.append(name_info["canonical_name"])
|
emoji_names.append(name_info["canonical_name"])
|
||||||
emoji_names.extend(name_info["aliases"])
|
emoji_names.extend(name_info["aliases"])
|
||||||
|
@ -73,8 +73,8 @@ def get_emoji_code(emoji_dict: Dict[str, Any]) -> str:
|
||||||
# `emoji_data`.
|
# `emoji_data`.
|
||||||
def generate_emoji_catalog(emoji_data: List[Dict[str, Any]],
|
def generate_emoji_catalog(emoji_data: List[Dict[str, Any]],
|
||||||
emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, List[str]]:
|
emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, List[str]]:
|
||||||
sort_order = {} # type: Dict[str, int]
|
sort_order: Dict[str, int] = {}
|
||||||
emoji_catalog = defaultdict(list) # type: Dict[str, List[str]]
|
emoji_catalog: Dict[str, List[str]] = defaultdict(list)
|
||||||
|
|
||||||
for emoji_dict in emoji_data:
|
for emoji_dict in emoji_data:
|
||||||
emoji_code = get_emoji_code(emoji_dict)
|
emoji_code = get_emoji_code(emoji_dict)
|
||||||
|
@ -100,7 +100,7 @@ def emoji_is_universal(emoji_dict: Dict[str, Any]) -> bool:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def generate_codepoint_to_name_map(emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
|
def generate_codepoint_to_name_map(emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
|
||||||
codepoint_to_name = {} # type: Dict[str, str]
|
codepoint_to_name: Dict[str, str] = {}
|
||||||
for emoji_code, name_info in emoji_name_maps.items():
|
for emoji_code, name_info in emoji_name_maps.items():
|
||||||
codepoint_to_name[emoji_code] = name_info["canonical_name"]
|
codepoint_to_name[emoji_code] = name_info["canonical_name"]
|
||||||
return codepoint_to_name
|
return codepoint_to_name
|
||||||
|
|
|
@ -21,7 +21,7 @@ ZULIP_PATH = os.path.dirname(TOOLS_DIR_PATH)
|
||||||
# `emoji.json` file is same in all four emoji-datasource packages.
|
# `emoji.json` file is same in all four emoji-datasource packages.
|
||||||
EMOJI_DATA_PATH = os.path.join(ZULIP_PATH, 'node_modules', 'emoji-datasource-google', 'emoji.json')
|
EMOJI_DATA_PATH = os.path.join(ZULIP_PATH, 'node_modules', 'emoji-datasource-google', 'emoji.json')
|
||||||
|
|
||||||
sorting_info = {} # type: Dict[str, Any]
|
sorting_info: Dict[str, Any] = {}
|
||||||
column_names = [
|
column_names = [
|
||||||
'Codepoint',
|
'Codepoint',
|
||||||
'New sorting info',
|
'New sorting info',
|
||||||
|
@ -47,7 +47,7 @@ name_entry_regex = re.compile(r"'(?P<emoji_code>[a-z0-9-]+)': "
|
||||||
explanation_regex = re.compile(r" # (?P<explanation_line>[^\r\n\t]+)")
|
explanation_regex = re.compile(r" # (?P<explanation_line>[^\r\n\t]+)")
|
||||||
|
|
||||||
def prepare_sorting_info() -> None:
|
def prepare_sorting_info() -> None:
|
||||||
emoji_data = [] # type: List[Dict[str, Any]]
|
emoji_data: List[Dict[str, Any]] = []
|
||||||
with open(EMOJI_DATA_PATH) as fp:
|
with open(EMOJI_DATA_PATH) as fp:
|
||||||
emoji_data = ujson.load(fp)
|
emoji_data = ujson.load(fp)
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ def main() -> None:
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
prepare_sorting_info()
|
prepare_sorting_info()
|
||||||
output_data = [column_names, ]
|
output_data = [column_names, ]
|
||||||
explanation_lines = [] # type: List[str]
|
explanation_lines: List[str] = []
|
||||||
with open(args.input_file_path) as fp:
|
with open(args.input_file_path) as fp:
|
||||||
for line in fp.readlines():
|
for line in fp.readlines():
|
||||||
match = name_entry_regex.search(line)
|
match = name_entry_regex.search(line)
|
||||||
|
|
|
@ -119,14 +119,14 @@ SORTED_CATEGORIES = [
|
||||||
'Skin Tones',
|
'Skin Tones',
|
||||||
]
|
]
|
||||||
|
|
||||||
emoji_code_to_zulip_names = {} # type: Dict[str, str]
|
emoji_code_to_zulip_names: Dict[str, str] = {}
|
||||||
emoji_code_to_iamcal_names = {} # type: Dict[str, str]
|
emoji_code_to_iamcal_names: Dict[str, str] = {}
|
||||||
emoji_code_to_gemoji_names = {} # type: Dict[str, str]
|
emoji_code_to_gemoji_names: Dict[str, str] = {}
|
||||||
emoji_collection = {category: [] for category in SORTED_CATEGORIES} # type: Dict[str, List[Dict[str, Any]]]
|
emoji_collection: Dict[str, List[Dict[str, Any]]] = {category: [] for category in SORTED_CATEGORIES}
|
||||||
|
|
||||||
def generate_emoji_code_to_emoji_names_maps() -> None:
|
def generate_emoji_code_to_emoji_names_maps() -> None:
|
||||||
# Prepare gemoji names map.
|
# Prepare gemoji names map.
|
||||||
reverse_unified_reactions_map = {} # type: Dict[str, List[str]]
|
reverse_unified_reactions_map: Dict[str, List[str]] = {}
|
||||||
for name in UNIFIED_REACTIONS_MAP:
|
for name in UNIFIED_REACTIONS_MAP:
|
||||||
emoji_code = UNIFIED_REACTIONS_MAP[name]
|
emoji_code = UNIFIED_REACTIONS_MAP[name]
|
||||||
if emoji_code in reverse_unified_reactions_map:
|
if emoji_code in reverse_unified_reactions_map:
|
||||||
|
|
|
@ -35,10 +35,10 @@ FILE_TEMPLATE = (
|
||||||
"} # type: Dict[str, Dict[str, Any]]\n"
|
"} # type: Dict[str, Dict[str, Any]]\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
emoji_names = set() # type: Set[str]
|
emoji_names: Set[str] = set()
|
||||||
|
|
||||||
def load_data(data_file: str) -> List[List[str]]:
|
def load_data(data_file: str) -> List[List[str]]:
|
||||||
emoji_name_data = [] # type: List[List[str]]
|
emoji_name_data: List[List[str]] = []
|
||||||
with open(data_file, newline='') as fp:
|
with open(data_file, newline='') as fp:
|
||||||
data = csv.reader(fp)
|
data = csv.reader(fp)
|
||||||
for row in data:
|
for row in data:
|
||||||
|
@ -66,13 +66,13 @@ def prepare_explanation(explanation: str) -> str:
|
||||||
if explanation == '':
|
if explanation == '':
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
wrapper_config = {
|
wrapper_config: Dict[str, Any] = {
|
||||||
'width': 80,
|
'width': 80,
|
||||||
'break_long_words': False,
|
'break_long_words': False,
|
||||||
'break_on_hyphens': False,
|
'break_on_hyphens': False,
|
||||||
'initial_indent': '\n # ',
|
'initial_indent': '\n # ',
|
||||||
'subsequent_indent': '\n # ',
|
'subsequent_indent': '\n # ',
|
||||||
} # type: Dict[str, Any]
|
}
|
||||||
wrapped_lines = textwrap.wrap(explanation.strip(), **wrapper_config)
|
wrapped_lines = textwrap.wrap(explanation.strip(), **wrapper_config)
|
||||||
return ''.join(wrapped_lines)
|
return ''.join(wrapped_lines)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue