python: Convert assignment type annotations to Python 3.6 style.

Commit split by tabbott; this has changes to scripts/, tools/, and
puppet/.

scripts/lib/hash_reqs.py, scripts/lib/setup_venv.py,
scripts/lib/zulip_tools.py, and tools/lib/provision.py are excluded so
tools/provision still gives the right error message on Ubuntu 16.04
with Python 3.5.

Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:

-shebang_rules: List[Rule] = [
+shebang_rules: List["Rule"] = [

-trailing_whitespace_rule: Rule = {
+trailing_whitespace_rule: "Rule" = {

-whitespace_rules: List[Rule] = [
+whitespace_rules: List["Rule"] = [

-comma_whitespace_rule: List[Rule] = [
+comma_whitespace_rule: List["Rule"] = [

-prose_style_rules: List[Rule] = [
+prose_style_rules: List["Rule"] = [

-html_rules: List[Rule] = whitespace_rules + prose_style_rules + [
+html_rules: List["Rule"] = whitespace_rules + prose_style_rules + [

-    target_port: int = None
+    target_port: int

Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
This commit is contained in:
Anders Kaseorg 2020-04-21 16:09:50 -07:00 committed by Tim Abbott
parent ad07814fa4
commit f8339f019d
26 changed files with 89 additions and 88 deletions

View File

@ -155,7 +155,7 @@ send_zulip(zulip_sender, {
"to": recipient.email,
})
msg_content = [] # type: List[str]
msg_content: List[str] = []
while msg_to_send not in msg_content:
messages = get_zulips()

View File

@ -12,14 +12,14 @@ from typing import Dict
import os
import time
RESULTS_DIR = "/home/zulip/mirror_status" # type: str
RESULTS_DIR: str = "/home/zulip/mirror_status"
states = {
states: Dict[str, int] = {
"OK": 0,
"WARNING": 1,
"CRITICAL": 2,
"UNKNOWN": 3
} # type: Dict[str, int]
}
def report(state: str, output: str) -> None:
print("%s\n%s" % (state, output))

View File

@ -28,12 +28,12 @@ from zerver.models import UserActivity
from typing import Any, Dict, Set, Optional
states = {
states: Dict[str, int] = {
"OK": 0,
"WARNING": 1,
"CRITICAL": 2,
"UNKNOWN": 3
} # type: Dict[str, int]
}
def report(state: str, short_msg: str, too_old: Optional[Set[Any]] = None) -> None:
too_old_data = ""

View File

@ -15,12 +15,12 @@ import time
RESULTS_FILE = "/var/lib/nagios_state/check-mirroring-results"
states = {
states: Dict[str, int] = {
"OK": 0,
"WARNING": 1,
"CRITICAL": 2,
"UNKNOWN": 3
} # type: Dict[str, int]
}
def report(state: str, data: str, last_check: float) -> None:
print("%s: Last test run completed at %s\n%s" % (

View File

@ -40,28 +40,28 @@ states = {
3: "UNKNOWN"
}
MAX_SECONDS_TO_CLEAR_FOR_BURSTS = defaultdict(
MAX_SECONDS_TO_CLEAR_FOR_BURSTS: DefaultDict[str, int] = defaultdict(
lambda: 120,
digest_emails=600,
slow_queries=600,
) # type: DefaultDict[str, int]
MAX_SECONDS_TO_CLEAR_NORMAL = defaultdict(
)
MAX_SECONDS_TO_CLEAR_NORMAL: DefaultDict[str, int] = defaultdict(
lambda: 30,
digest_emails=1200,
slow_queries=120,
missedmessage_mobile_notifications=120,
) # type: DefaultDict[str, int]
CRITICAL_SECONDS_TO_CLEAR_FOR_BURSTS = defaultdict(
)
CRITICAL_SECONDS_TO_CLEAR_FOR_BURSTS: DefaultDict[str, int] = defaultdict(
lambda: 240,
digest_emails=1200,
slow_queries=1200,
) # type: DefaultDict[str, int]
CRITICAL_SECONDS_TO_CLEAR_NORMAL = defaultdict(
)
CRITICAL_SECONDS_TO_CLEAR_NORMAL: DefaultDict[str, int] = defaultdict(
lambda: 60,
missedmessage_mobile_notifications=180,
digest_emails=600,
slow_queries=600,
) # type: DefaultDict[str, int]
)
def analyze_queue_stats(queue_name: str, stats: Dict[str, Any],
queue_count_rabbitmqctl: int) -> Dict[str, Any]:
@ -183,7 +183,7 @@ def check_rabbitmq_queues() -> None:
queue_stats_dir = subprocess.check_output([os.path.join(ZULIP_PATH, 'scripts/get-django-setting'),
'QUEUE_STATS_DIR'],
universal_newlines=True).strip()
queue_stats = dict() # type: Dict[str, Dict[str, Any]]
queue_stats: Dict[str, Dict[str, Any]] = dict()
queues_to_check = set(normal_queues).intersection(set(queues_with_consumers))
for queue in queues_to_check:
fn = queue + ".stats"

View File

@ -15,7 +15,7 @@ from scripts.lib.zulip_tools import DEPLOYMENTS_DIR, FAIL, ENDC, \
su_to_zulip, get_deployment_lock, release_deployment_lock, assert_running_as_root, \
get_config_file, get_deploy_options
config_file = get_config_file() # type: configparser.RawConfigParser
config_file: configparser.RawConfigParser = get_config_file()
deploy_options = get_deploy_options(config_file)
assert_running_as_root(strip_lib_from_paths=True)

View File

@ -46,7 +46,7 @@ TORNADO_PROCESSES = int(get_config('application_server', 'tornado_processes', '1
output = subprocess.check_output(['/usr/sbin/rabbitmqctl', 'list_consumers'],
universal_newlines=True)
consumers = defaultdict(int) # type: Dict[str, int]
consumers: Dict[str, int] = defaultdict(int)
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
queues = set(normal_queues).union({

View File

@ -52,7 +52,7 @@ def generate_secrets(development: bool = False) -> None:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
current_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
lines = [] # type: List[str]
lines: List[str] = []
if len(current_conf) == 0:
lines = ['[secrets]\n']

View File

@ -57,7 +57,7 @@ def check_issue_labels() -> None:
"your api token. If you want to continue without using a token use --force.")
sys.exit(1)
next_page_url = 'https://api.github.com/repos/zulip/zulip/issues' # type: Optional[str]
next_page_url: Optional[str] = 'https://api.github.com/repos/zulip/zulip/issues'
unlabeled_issue_urls = []
while next_page_url:
try:

View File

@ -96,7 +96,7 @@ def check_html_templates(templates: Iterable[str], all_dups: bool, fix: bool) ->
print(fn)
return bad_ids_dict
bad_ids_list = [] # type: List[str]
bad_ids_list: List[str] = []
archive_templates = list(filter(
lambda fn: ('templates/zerver/archive' in fn),
templates))

View File

@ -109,7 +109,7 @@ def create_user_docs() -> None:
for line in open(fn):
calls.append(ujson.loads(line))
pattern_dict = defaultdict(list) # type: Dict[str, List[Call]]
pattern_dict: Dict[str, List[Call]] = defaultdict(list)
for call in calls:
if 'pattern' in call:
pattern = clean_up_pattern(call['pattern'])
@ -124,7 +124,7 @@ def create_user_docs() -> None:
('json', 'legacy'),
]
groups = dict() # type: Dict[str, Set[str]]
groups: Dict[str, Set[str]] = dict()
for prefix, name in tups:
groups[name] = {p for p in patterns if p.startswith(prefix)}
patterns -= groups[name]

View File

@ -20,8 +20,8 @@ class UnusedImagesLinterSpider(BaseDocumentationSpider):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.static_images = set() # type: Set[str]
self.images_static_dir = get_images_dir(self.images_path) # type: str
self.static_images: Set[str] = set()
self.images_static_dir: str = get_images_dir(self.images_path)
def _is_external_url(self, url: str) -> bool:
is_external = url.startswith('http') and self.start_urls[0] not in url
@ -43,7 +43,7 @@ class UnusedImagesLinterSpider(BaseDocumentationSpider):
class HelpDocumentationSpider(UnusedImagesLinterSpider):
name = "help_documentation_crawler"
start_urls = ['http://localhost:9981/help']
deny_domains = [] # type: List[str]
deny_domains: List[str] = []
deny = ['/privacy']
images_path = "static/images/help"
@ -51,7 +51,7 @@ class HelpDocumentationSpider(UnusedImagesLinterSpider):
class APIDocumentationSpider(UnusedImagesLinterSpider):
name = 'api_documentation_crawler'
start_urls = ['http://localhost:9981/api']
deny_domains = [] # type: List[str]
deny_domains: List[str] = []
images_path = "static/images/api"
class PorticoDocumentationSpider(BaseDocumentationSpider):
@ -79,4 +79,4 @@ class PorticoDocumentationSpider(BaseDocumentationSpider):
'http://localhost:9981/for/working-groups-and-communities',
'http://localhost:9981/for/mystery-hunt',
'http://localhost:9981/security']
deny_domains = [] # type: List[str]
deny_domains: List[str] = []

View File

@ -44,12 +44,12 @@ VNU_IGNORE = re.compile(r'|'.join([
class BaseDocumentationSpider(scrapy.Spider):
name = None # type: Optional[str]
name: Optional[str] = None
# Exclude domain address.
deny_domains = [] # type: List[str]
start_urls = [] # type: List[str]
deny = [] # type: List[str]
file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]
deny_domains: List[str] = []
start_urls: List[str] = []
deny: List[str] = []
file_extensions: List[str] = ['.' + ext for ext in IGNORED_EXTENSIONS]
tags = ('a', 'area', 'img')
attrs = ('href', 'src')
@ -107,7 +107,7 @@ class BaseDocumentationSpider(scrapy.Spider):
return callback
def _make_requests(self, url: str) -> Iterable[Request]:
callback = self.parse # type: Callable[[Response], Optional[Iterable[Request]]]
callback: Callable[[Response], Optional[Iterable[Request]]] = self.parse
dont_filter = False
method = 'GET'
if self._is_external_url(url):

View File

@ -44,7 +44,7 @@ ContributorsJSON = TypedDict('ContributorsJSON', {
logger = logging.getLogger('zulip.fetch_contributors_json')
def fetch_contributors(repo_link: str) -> Optional[List[Dict[str, Dict[str, Any]]]]:
r = requests.get(repo_link, verify=os.environ.get('CUSTOM_CA_CERTIFICATES')) # type: requests.Response
r: requests.Response = requests.get(repo_link, verify=os.environ.get('CUSTOM_CA_CERTIFICATES'))
return r.json() if r.status_code == 200 else None
def write_to_disk(json_data: ContributorsJSON, out_file: str) -> None:
@ -77,8 +77,8 @@ def update_contributor_data_file() -> None:
'zulip-android': 'https://api.github.com/repos/zulip/zulip-android/stats/contributors',
}
data = dict(date=str(date.today()), contrib=[]) # type: ContributorsJSON
contribs_list = {} # type: Dict[str, Dict[str, Union[str, int]]]
data: ContributorsJSON = dict(date=str(date.today()), contrib=[])
contribs_list: Dict[str, Dict[str, Union[str, int]]] = {}
retry_attempts = 0
while True:

View File

@ -28,7 +28,7 @@ def get_translation_stats(resource_path: str) -> Dict[str, int]:
not_translated = len([i for i in raw_info.items() if i[1] == ''])
return {'total': total, 'not_translated': not_translated}
translation_stats = {} # type: Dict[str, Dict[str, int]]
translation_stats: Dict[str, Dict[str, int]] = {}
locale_paths = [] # List[str]
for locale in get_locales():
path = get_json_filename(locale)

View File

@ -26,7 +26,7 @@ class HtmlTreeBranch:
self.fn = fn
self.line = tags[-1].token.line
self.words = set() # type: Set[str]
self.words: Set[str] = set()
for tag in tags:
for word in tag.words:
self.words.add(word)
@ -60,8 +60,8 @@ class Node:
def __init__(self, token: Token, parent: "Optional[Node]") -> None:
# FIXME parent parameter is not used!
self.token = token
self.children = [] # type: List[Node]
self.parent = None # type: Optional[Node]
self.children: List[Node] = []
self.parent: Optional[Node] = None
class TagInfo:
@ -87,8 +87,8 @@ class TagInfo:
def get_tag_info(token: Token) -> TagInfo:
s = token.s
tag = token.tag
classes = [] # type: List[str]
ids = [] # type: List[str]
classes: List[str] = []
ids: List[str] = []
searches = [
(classes, ' class="(.*?)"'),
@ -111,7 +111,7 @@ def split_for_id_and_class(element: str) -> List[str]:
# attributes from HTML tags. This also takes care of template variables
# in string during splitting process. For eg. 'red black {{ a|b|c }}'
# is split as ['red', 'black', '{{ a|b|c }}']
outside_braces = True # type: bool
outside_braces: bool = True
lst = []
s = ''
@ -134,7 +134,7 @@ def split_for_id_and_class(element: str) -> List[str]:
def html_branches(text: str, fn: Optional[str] = None) -> List[HtmlTreeBranch]:
tree = html_tag_tree(text)
branches = [] # type: List[HtmlTreeBranch]
branches: List[HtmlTreeBranch] = []
def walk(node: Node, tag_info_list: Optional[List[TagInfo]] = None) -> None:
info = get_tag_info(node.token)
@ -179,7 +179,7 @@ def html_tag_tree(text: str) -> Node:
def build_id_dict(templates: List[str]) -> (Dict[str, List[str]]):
template_id_dict = defaultdict(list) # type: (Dict[str, List[str]])
template_id_dict: (Dict[str, List[str]]) = defaultdict(list)
for fn in templates:
with open(fn) as f:

View File

@ -17,23 +17,24 @@ def pretty_print_html(html: str, num_spaces: int = 4) -> str:
# We will keep a stack of "start" tags so that we know
# when HTML ranges end. Note that some start tags won't
# be blocks from an indentation standpoint.
stack = [] # type: List[Dict[str, Any]]
stack: List[Dict[str, Any]] = []
# Seed our stack with a pseudo entry to make depth calculations
# easier.
info = dict(
info: Dict[str, Any] = dict(
block=False,
depth=-1,
line=-1,
token_kind='html_start',
tag='html',
extra_indent=0,
ignore_lines=[]) # type: Dict[str, Any]
ignore_lines=[],
)
stack.append(info)
# Our main job is to figure out offsets that we use to nudge lines
# over by.
offsets = {} # type: Dict[int, int]
offsets: Dict[int, int] = {}
# Loop through our start/end tokens, and calculate offsets. As
# we proceed, we will push/pop info dictionaries on/off a stack.

View File

@ -51,7 +51,7 @@ FILES_WITH_LEGACY_SUBJECT = {
'zerver/tests/test_narrow.py',
}
shebang_rules = [
shebang_rules: List["Rule"] = [
{'pattern': '^#!',
'description': "zerver library code shouldn't have a shebang line.",
'include_only': {'zerver/'}},
@ -63,14 +63,14 @@ shebang_rules = [
" for interpreters other than sh."},
{'pattern': '^#!/usr/bin/env python$',
'description': "Use `#!/usr/bin/env python3` instead of `#!/usr/bin/env python`."}
] # type: List[Rule]
]
trailing_whitespace_rule = {
trailing_whitespace_rule: "Rule" = {
'pattern': r'\s+$',
'strip': '\n',
'description': 'Fix trailing whitespace'
} # type: Rule
whitespace_rules = [
}
whitespace_rules: List["Rule"] = [
# This linter should be first since bash_rules depends on it.
trailing_whitespace_rule,
{'pattern': 'http://zulip.readthedocs.io',
@ -80,14 +80,14 @@ whitespace_rules = [
'strip': '\n',
'exclude': {'tools/ci/success-http-headers.txt'},
'description': 'Fix tab-based whitespace'},
] # type: List[Rule]
comma_whitespace_rule = [
]
comma_whitespace_rule: List["Rule"] = [
{'pattern': ', {2,}[^#/ ]',
'exclude': {'zerver/tests', 'frontend_tests/node_tests', 'corporate/tests'},
'description': "Remove multiple whitespaces after ','",
'good_lines': ['foo(1, 2, 3)', 'foo = bar # some inline comment'],
'bad_lines': ['foo(1, 2, 3)', 'foo(1, 2, 3)']},
] # type: List[Rule]
]
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != r'\s+$']) + [
# Two spaces trailing a line with other content is okay--it's a markdown line break.
# This rule finds one space trailing a non-space, three or more trailing spaces, and
@ -560,7 +560,7 @@ css_rules = RuleList(
],
)
prose_style_rules = [
prose_style_rules: List["Rule"] = [
{'pattern': r'[^\/\#\-"]([jJ]avascript)', # exclude usage in hrefs/divs
'exclude': {"docs/documentation/api.md"},
'description': "javascript should be spelled JavaScript"},
@ -576,8 +576,8 @@ prose_style_rules = [
{'pattern': '[^-_p]botserver(?!rc)|bot server',
'description': "Use Botserver instead of botserver or bot server."},
*comma_whitespace_rule,
] # type: List[Rule]
html_rules = whitespace_rules + prose_style_rules + [
]
html_rules: List["Rule"] = whitespace_rules + prose_style_rules + [
{'pattern': 'subject|SUBJECT',
'exclude': {'templates/zerver/email.html'},
'exclude_pattern': 'email subject',
@ -703,7 +703,7 @@ html_rules = whitespace_rules + prose_style_rules + [
},
'good_lines': ['#my-style {color: blue;}', 'style="display: none"', "style='display: none"],
'bad_lines': ['<p style="color: blue;">Foo</p>', 'style = "color: blue;"']},
] # type: List[Rule]
]
handlebars_rules = RuleList(
langs=['hbs'],

View File

@ -19,7 +19,7 @@ def validate_order(order: List[int], length: int) -> None:
sys.exit(1)
def renumber_migration(conflicts: List[str], order: List[int], last_correct_migration: str) -> None:
stack = [] # type: List[str]
stack: List[str] = []
for i in order:
if conflicts[i-1][0:4] not in stack:
stack.append(conflicts[i-1][0:4])
@ -53,8 +53,8 @@ def resolve_conflicts(conflicts: List[str], files_list: List[str]) -> None:
if __name__ == '__main__':
while True:
conflicts = [] # type: List[str]
stack = [] # type: List[str]
conflicts: List[str] = []
stack: List[str] = []
files_list = [os.path.basename(path) for path in glob.glob("zerver/migrations/????_*.py")]
file_index = [file[0:4] for file in files_list]

View File

@ -84,7 +84,7 @@ if options.interface is None:
elif options.interface == "":
options.interface = None
runserver_args = [] # type: List[str]
runserver_args: List[str] = []
base_port = 9991
if options.test:
base_port = 9981
@ -199,9 +199,9 @@ def fetch_request(url: str, callback: Any, **kwargs: Any) -> "Generator[Callable
class BaseHandler(web.RequestHandler):
# target server ip
target_host = '127.0.0.1' # type: str
target_host: str = '127.0.0.1'
# target server port
target_port = None # type: int
target_port: int
def _add_request_headers(
self, exclude_lower_headers_list: Optional[List[str]] = None

View File

@ -69,7 +69,7 @@ if not python_files and not pyi_files:
print("There are no files to run mypy on.")
sys.exit(0)
mypy_args = [] # type: List[str]
mypy_args: List[str] = []
if args.quiet:
mypy_args += ["--no-error-summary"]
mypy_args += ["--"] + python_files + pyi_files

View File

@ -1,6 +1,6 @@
from typing import Any, Dict
EMOJI_NAME_MAPS = {
EMOJI_NAME_MAPS: Dict[str, Dict[str, Any]] = {
# seems like best emoji for happy
'1f600': {'canonical_name': 'grinning', 'aliases': ['happy']},
'1f603': {'canonical_name': 'smiley', 'aliases': []},
@ -1528,4 +1528,4 @@ EMOJI_NAME_MAPS = {
'1f6a9': {'canonical_name': 'triangular_flag', 'aliases': []},
# solidarity from iemoji
'1f38c': {'canonical_name': 'crossed_flags', 'aliases': ['solidarity']},
} # type: Dict[str, Dict[str, Any]]
}

View File

@ -47,7 +47,7 @@ EMOTICON_CONVERSIONS = {
}
def emoji_names_for_picker(emoji_name_maps: Dict[str, Dict[str, Any]]) -> List[str]:
emoji_names = [] # type: List[str]
emoji_names: List[str] = []
for emoji_code, name_info in emoji_name_maps.items():
emoji_names.append(name_info["canonical_name"])
emoji_names.extend(name_info["aliases"])
@ -73,8 +73,8 @@ def get_emoji_code(emoji_dict: Dict[str, Any]) -> str:
# `emoji_data`.
def generate_emoji_catalog(emoji_data: List[Dict[str, Any]],
emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, List[str]]:
sort_order = {} # type: Dict[str, int]
emoji_catalog = defaultdict(list) # type: Dict[str, List[str]]
sort_order: Dict[str, int] = {}
emoji_catalog: Dict[str, List[str]] = defaultdict(list)
for emoji_dict in emoji_data:
emoji_code = get_emoji_code(emoji_dict)
@ -100,7 +100,7 @@ def emoji_is_universal(emoji_dict: Dict[str, Any]) -> bool:
return True
def generate_codepoint_to_name_map(emoji_name_maps: Dict[str, Dict[str, Any]]) -> Dict[str, str]:
codepoint_to_name = {} # type: Dict[str, str]
codepoint_to_name: Dict[str, str] = {}
for emoji_code, name_info in emoji_name_maps.items():
codepoint_to_name[emoji_code] = name_info["canonical_name"]
return codepoint_to_name

View File

@ -21,7 +21,7 @@ ZULIP_PATH = os.path.dirname(TOOLS_DIR_PATH)
# `emoji.json` file is same in all four emoji-datasource packages.
EMOJI_DATA_PATH = os.path.join(ZULIP_PATH, 'node_modules', 'emoji-datasource-google', 'emoji.json')
sorting_info = {} # type: Dict[str, Any]
sorting_info: Dict[str, Any] = {}
column_names = [
'Codepoint',
'New sorting info',
@ -47,7 +47,7 @@ name_entry_regex = re.compile(r"'(?P<emoji_code>[a-z0-9-]+)': "
explanation_regex = re.compile(r" # (?P<explanation_line>[^\r\n\t]+)")
def prepare_sorting_info() -> None:
emoji_data = [] # type: List[Dict[str, Any]]
emoji_data: List[Dict[str, Any]] = []
with open(EMOJI_DATA_PATH) as fp:
emoji_data = ujson.load(fp)
@ -90,7 +90,7 @@ def main() -> None:
args = parser.parse_args()
prepare_sorting_info()
output_data = [column_names, ]
explanation_lines = [] # type: List[str]
explanation_lines: List[str] = []
with open(args.input_file_path) as fp:
for line in fp.readlines():
match = name_entry_regex.search(line)

View File

@ -119,14 +119,14 @@ SORTED_CATEGORIES = [
'Skin Tones',
]
emoji_code_to_zulip_names = {} # type: Dict[str, str]
emoji_code_to_iamcal_names = {} # type: Dict[str, str]
emoji_code_to_gemoji_names = {} # type: Dict[str, str]
emoji_collection = {category: [] for category in SORTED_CATEGORIES} # type: Dict[str, List[Dict[str, Any]]]
emoji_code_to_zulip_names: Dict[str, str] = {}
emoji_code_to_iamcal_names: Dict[str, str] = {}
emoji_code_to_gemoji_names: Dict[str, str] = {}
emoji_collection: Dict[str, List[Dict[str, Any]]] = {category: [] for category in SORTED_CATEGORIES}
def generate_emoji_code_to_emoji_names_maps() -> None:
# Prepare gemoji names map.
reverse_unified_reactions_map = {} # type: Dict[str, List[str]]
reverse_unified_reactions_map: Dict[str, List[str]] = {}
for name in UNIFIED_REACTIONS_MAP:
emoji_code = UNIFIED_REACTIONS_MAP[name]
if emoji_code in reverse_unified_reactions_map:

View File

@ -35,10 +35,10 @@ FILE_TEMPLATE = (
"} # type: Dict[str, Dict[str, Any]]\n"
)
emoji_names = set() # type: Set[str]
emoji_names: Set[str] = set()
def load_data(data_file: str) -> List[List[str]]:
emoji_name_data = [] # type: List[List[str]]
emoji_name_data: List[List[str]] = []
with open(data_file, newline='') as fp:
data = csv.reader(fp)
for row in data:
@ -66,13 +66,13 @@ def prepare_explanation(explanation: str) -> str:
if explanation == '':
return ''
wrapper_config = {
wrapper_config: Dict[str, Any] = {
'width': 80,
'break_long_words': False,
'break_on_hyphens': False,
'initial_indent': '\n # ',
'subsequent_indent': '\n # ',
} # type: Dict[str, Any]
}
wrapped_lines = textwrap.wrap(explanation.strip(), **wrapper_config)
return ''.join(wrapped_lines)