mirror of https://github.com/zulip/zulip.git
python: Migrate open statements to use with.
This is low priority, but it's nice to be consistently using the best practice pattern. Fixes: #12419.
This commit is contained in:
parent
e97179fc87
commit
e331a758c3
|
@ -18,7 +18,8 @@ def nagios_from_file(results_file: str, max_time_diff: int=60 * 2) -> 'Tuple[int
|
|||
This file is created by various nagios checking cron jobs such as
|
||||
check-rabbitmq-queues and check-rabbitmq-consumers"""
|
||||
|
||||
data = open(results_file).read().strip()
|
||||
with open(results_file, 'r') as f:
|
||||
data = f.read().strip()
|
||||
pieces = data.split('|')
|
||||
|
||||
if not len(pieces) == 4:
|
||||
|
|
|
@ -34,7 +34,8 @@ down_count = 0
|
|||
for results_file_name in os.listdir(RESULTS_DIR):
|
||||
this_state = "OK"
|
||||
results_file = os.path.join(RESULTS_DIR, results_file_name)
|
||||
data = open(results_file).read().strip()
|
||||
with open(results_file, 'r') as f:
|
||||
data = f.read().strip()
|
||||
last_check = os.stat(results_file).st_mtime
|
||||
time_since_last_check = time.time() - last_check
|
||||
# time_since_last_check threshold needs to be strictly greater
|
||||
|
|
|
@ -32,7 +32,8 @@ def report(state, data, last_check):
|
|||
data))
|
||||
exit(states[state])
|
||||
|
||||
data = open(RESULTS_FILE).read().strip()
|
||||
with open(RESULTS_FILE, 'r') as f:
|
||||
data = f.read().strip()
|
||||
if data.split("\n")[-1].strip() == "0":
|
||||
state = "OK"
|
||||
else:
|
||||
|
|
|
@ -260,16 +260,14 @@ def do_patch_activate_script(venv_path):
|
|||
# venv_path should be what we want to have in VIRTUAL_ENV after patching
|
||||
script_path = os.path.join(venv_path, "bin", "activate")
|
||||
|
||||
file_obj = open(script_path)
|
||||
lines = file_obj.readlines()
|
||||
with open(script_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('VIRTUAL_ENV='):
|
||||
lines[i] = 'VIRTUAL_ENV="%s"\n' % (venv_path,)
|
||||
file_obj.close()
|
||||
|
||||
file_obj = open(script_path, 'w')
|
||||
file_obj.write("".join(lines))
|
||||
file_obj.close()
|
||||
with open(script_path, 'w') as f:
|
||||
f.write("".join(lines))
|
||||
|
||||
def setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None, patch_activate_script=False):
|
||||
# type: (Optional[str], str, Optional[List[str]], bool) -> str
|
||||
|
@ -285,7 +283,8 @@ def setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None,
|
|||
success_stamp = os.path.join(cached_venv_path, "success-stamp")
|
||||
if not os.path.exists(success_stamp):
|
||||
do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or [])
|
||||
open(success_stamp, 'w').close()
|
||||
with open(success_stamp, 'w') as f:
|
||||
f.close()
|
||||
|
||||
print("Using cached Python venv from %s" % (cached_venv_path,))
|
||||
if target_venv_path is not None:
|
||||
|
|
|
@ -14,7 +14,8 @@ def nagios_from_file(results_file):
|
|||
This file is created by various nagios checking cron jobs such as
|
||||
check-rabbitmq-queues and check-rabbitmq-consumers"""
|
||||
|
||||
data = open(results_file).read().strip()
|
||||
with open(results_file) as f:
|
||||
data = f.read().strip()
|
||||
pieces = data.split('|')
|
||||
|
||||
if not len(pieces) == 4:
|
||||
|
|
|
@ -108,11 +108,10 @@ def generate_secrets(development=False):
|
|||
print("generate_secrets: No new secrets to generate.")
|
||||
return
|
||||
|
||||
out = open(OUTPUT_SETTINGS_FILENAME, 'a')
|
||||
with open(OUTPUT_SETTINGS_FILENAME, 'a') as f:
|
||||
# Write a newline at the start, in case there was no newline at
|
||||
# the end of the file due to human editing.
|
||||
out.write("\n" + "".join(lines))
|
||||
out.close()
|
||||
f.write("\n" + "".join(lines))
|
||||
|
||||
print("Generated new secrets in %s." % (OUTPUT_SETTINGS_FILENAME,))
|
||||
|
||||
|
|
|
@ -15,7 +15,8 @@ def debug(obj):
|
|||
|
||||
def parse_file(fn):
|
||||
# type: (str) -> Dict[str, Any]
|
||||
text = open(fn).read()
|
||||
with open(fn, 'r') as f:
|
||||
text = f.read()
|
||||
tags = re.findall(r'{+\s*(.*?)\s*}+', text)
|
||||
root = {} # type: Dict[str, Any]
|
||||
context = root
|
||||
|
|
|
@ -193,7 +193,8 @@ def build_id_dict(templates):
|
|||
template_id_dict = defaultdict(list) # type: (Dict[str, List[str]])
|
||||
|
||||
for fn in templates:
|
||||
text = open(fn).read()
|
||||
with open(fn, 'r') as f:
|
||||
text = f.read()
|
||||
list_tags = tokenize(text)
|
||||
|
||||
for tag in list_tags:
|
||||
|
|
|
@ -7,7 +7,8 @@ def show_all_branches(fns):
|
|||
# type: (List[str]) -> None
|
||||
for fn in fns:
|
||||
print(fn)
|
||||
text = open(fn).read()
|
||||
with open(fn, 'r') as f:
|
||||
text = f.read()
|
||||
branches = html_branches(text, fn=fn)
|
||||
for branch in branches:
|
||||
print(branch.text())
|
||||
|
@ -25,7 +26,8 @@ class Grepper:
|
|||
all_branches = [] # type: List[HtmlTreeBranch]
|
||||
|
||||
for fn in fns:
|
||||
text = open(fn).read()
|
||||
with open(fn, 'r') as f:
|
||||
text = f.read()
|
||||
branches = html_branches(text, fn=fn)
|
||||
all_branches += branches
|
||||
|
||||
|
|
|
@ -194,10 +194,9 @@ def pretty_print_html(html, num_spaces=4):
|
|||
|
||||
def validate_indent_html(fn, fix):
|
||||
# type: (str, bool) -> int
|
||||
file = open(fn)
|
||||
html = file.read()
|
||||
with open(fn, 'r') as f:
|
||||
html = f.read()
|
||||
phtml = pretty_print_html(html)
|
||||
file.close()
|
||||
if not html.split('\n') == phtml.split('\n'):
|
||||
if fix:
|
||||
print(GREEN + "Automatically fixing problems..." + ENDC)
|
||||
|
|
|
@ -214,7 +214,8 @@ def validate(fn=None, text=None, check_indent=True):
|
|||
fn = '<in memory file>'
|
||||
|
||||
if text is None:
|
||||
text = open(fn).read()
|
||||
with open(fn, 'r') as f:
|
||||
text = f.read()
|
||||
|
||||
tokens = tokenize(text)
|
||||
|
||||
|
|
|
@ -56,7 +56,8 @@ def get_provisioning_status():
|
|||
# their own dependencies and not running provision.
|
||||
return True, None
|
||||
|
||||
version = open(version_file).read().strip()
|
||||
with open(version_file, 'r') as f:
|
||||
version = f.read().strip()
|
||||
|
||||
# Normal path for people that provision--we're all good!
|
||||
if version == PROVISION_VERSION:
|
||||
|
|
|
@ -7,13 +7,11 @@ def clean_html(filenames):
|
|||
# type: (List[str]) -> None
|
||||
for fn in filenames:
|
||||
print('Prettifying: %s' % (fn,))
|
||||
file = open(fn)
|
||||
html = file.read()
|
||||
with open(fn, 'r') as f:
|
||||
html = f.read()
|
||||
phtml = pretty_print_html(html)
|
||||
file.close()
|
||||
file = open(fn, 'w')
|
||||
file.write(phtml)
|
||||
file.close()
|
||||
with open(fn, 'w') as f:
|
||||
f.write(phtml)
|
||||
|
||||
if __name__ == '__main__':
|
||||
# If command arguments are provided, we only check those filenames.
|
||||
|
|
|
@ -133,9 +133,8 @@ else:
|
|||
# Required for compatibility python versions.
|
||||
if not os.path.exists(os.path.dirname(pid_file_path)):
|
||||
os.makedirs(os.path.dirname(pid_file_path))
|
||||
pid_file = open(pid_file_path, 'w+')
|
||||
pid_file.write(str(os.getpgrp()) + "\n")
|
||||
pid_file.close()
|
||||
with open(pid_file_path, 'w+') as f:
|
||||
f.write(str(os.getpgrp()) + "\n")
|
||||
|
||||
# Pass --nostatic because we configure static serving ourselves in
|
||||
# zulip/urls.py.
|
||||
|
|
|
@ -92,7 +92,8 @@ def main() -> None:
|
|||
if not os.path.exists(success_stamp):
|
||||
print("Dumping emojis ...")
|
||||
dump_emojis(source_emoji_dump)
|
||||
open(success_stamp, 'w').close()
|
||||
with open(success_stamp, 'w') as f:
|
||||
f.close()
|
||||
|
||||
print("Using cached emojis from {}".format(source_emoji_dump))
|
||||
if os.path.lexists(TARGET_EMOJI_DUMP):
|
||||
|
@ -135,11 +136,10 @@ def generate_sprite_css_files(cache_path: str,
|
|||
}
|
||||
|
||||
SPRITE_CSS_PATH = os.path.join(cache_path, '%s-sprite.css' % (emojiset,))
|
||||
sprite_css_file = open(SPRITE_CSS_PATH, 'w')
|
||||
sprite_css_file.write(SPRITE_CSS_FILE_TEMPLATE % {'emojiset': emojiset,
|
||||
with open(SPRITE_CSS_PATH, 'w') as f:
|
||||
f.write(SPRITE_CSS_FILE_TEMPLATE % {'emojiset': emojiset,
|
||||
'emoji_positions': emoji_positions,
|
||||
})
|
||||
sprite_css_file.close()
|
||||
|
||||
def setup_emoji_farms(cache_path: str, emoji_data: List[Dict[str, Any]]) -> None:
|
||||
def ensure_emoji_image(emoji_dict: Dict[str, Any],
|
||||
|
|
|
@ -115,8 +115,8 @@ def main() -> None:
|
|||
explanation_line = match.group('explanation_line').strip()
|
||||
explanation_lines.append(explanation_line)
|
||||
|
||||
fp = open(args.output_file_path, 'w')
|
||||
writer = csv.writer(fp, dialect='excel')
|
||||
with open(args.output_file_path, 'w') as f:
|
||||
writer = csv.writer(f, dialect='excel')
|
||||
writer.writerows(output_data)
|
||||
# The CSV file exported by google sheets doesn't have a newline
|
||||
# character in the end. So we also strip the last newline character
|
||||
|
|
|
@ -83,8 +83,7 @@ for msg in result['messages']:
|
|||
messages.append(msg)
|
||||
|
||||
filename = "zulip-%s.json" % (options.stream,)
|
||||
f = open(filename, mode="wb")
|
||||
with open(filename, 'wb') as f:
|
||||
f.write(json.dumps(messages, indent=0, sort_keys=False).encode('utf-8'))
|
||||
f.close()
|
||||
print("%d messages exported to %s" % (len(messages), filename,))
|
||||
sys.exit(0)
|
||||
|
|
|
@ -800,7 +800,7 @@ def upload_file(client):
|
|||
|
||||
# {code_example|start}
|
||||
# Upload a file
|
||||
fp = open(path_to_file, 'rb')
|
||||
with open(path_to_file, 'rb') as fp:
|
||||
result = client.call_endpoint(
|
||||
'user_uploads',
|
||||
method='POST',
|
||||
|
@ -851,7 +851,7 @@ def upload_custom_emoji(client):
|
|||
|
||||
# {code_example|start}
|
||||
# Upload a custom emoji; assume `emoji_path` is the path to your image.
|
||||
fp = open(emoji_path, 'rb')
|
||||
with open(emoji_path, 'rb') as fp:
|
||||
emoji_name = 'my_custom_emoji'
|
||||
result = client.call_endpoint(
|
||||
'realm/emoji/{}'.format(emoji_name),
|
||||
|
@ -859,7 +859,7 @@ def upload_custom_emoji(client):
|
|||
files=[fp]
|
||||
)
|
||||
# {code_example|end}
|
||||
fp.close()
|
||||
|
||||
validate_against_openapi_schema(result,
|
||||
'/realm/emoji/{emoji_name}',
|
||||
'post', '200')
|
||||
|
|
|
@ -53,7 +53,8 @@ def tracemalloc_dump() -> None:
|
|||
gc.collect()
|
||||
tracemalloc.take_snapshot().dump(path)
|
||||
|
||||
procstat = open('/proc/{}/stat'.format(os.getpid()), 'rb').read().split()
|
||||
with open('/proc/{}/stat'.format(os.getpid()), 'rb') as f:
|
||||
procstat = f.read().split()
|
||||
rss_pages = int(procstat[23])
|
||||
logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
|
||||
.format(tracemalloc.get_traced_memory()[0] // 1048576,
|
||||
|
|
|
@ -1375,7 +1375,8 @@ def do_write_stats_file_for_realm_export(output_dir: Path) -> None:
|
|||
with open(stats_file, 'w') as f:
|
||||
for fn in fns:
|
||||
f.write(os.path.basename(fn) + '\n')
|
||||
payload = open(fn).read()
|
||||
with open(fn, 'r') as filename:
|
||||
payload = filename.read()
|
||||
data = ujson.loads(payload)
|
||||
for k in sorted(data):
|
||||
f.write('%5d %s\n' % (len(data[k]), k))
|
||||
|
@ -1386,7 +1387,8 @@ def do_write_stats_file_for_realm_export(output_dir: Path) -> None:
|
|||
|
||||
for fn in [avatar_file, uploads_file]:
|
||||
f.write(fn+'\n')
|
||||
payload = open(fn).read()
|
||||
with open(fn, 'r') as filename:
|
||||
payload = filename.read()
|
||||
data = ujson.loads(payload)
|
||||
f.write('%5d records\n' % (len(data),))
|
||||
f.write('\n')
|
||||
|
|
|
@ -735,7 +735,8 @@ class LocalUploadBackend(ZulipUploadBackend):
|
|||
return
|
||||
|
||||
image_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".original")
|
||||
image_data = open(image_path, "rb").read()
|
||||
with open(image_path, "rb") as f:
|
||||
image_data = f.read()
|
||||
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
|
||||
write_local_file('avatars', file_path + '-medium.png', resized_medium)
|
||||
|
||||
|
@ -748,7 +749,8 @@ class LocalUploadBackend(ZulipUploadBackend):
|
|||
return
|
||||
|
||||
image_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".original")
|
||||
image_data = open(image_path, "rb").read()
|
||||
with open(image_path, "rb") as f:
|
||||
image_data = f.read()
|
||||
resized_avatar = resize_avatar(image_data)
|
||||
write_local_file('avatars', file_path + '.png', resized_avatar)
|
||||
|
||||
|
|
|
@ -31,7 +31,8 @@ def add_deployment_metadata(report: Dict[str, Any]) -> None:
|
|||
|
||||
version_path = os.path.join(os.path.dirname(__file__), '../version')
|
||||
if os.path.exists(version_path):
|
||||
report['zulip_version_file'] = open(version_path).read().strip() # nocoverage
|
||||
with open(version_path, 'r') as f: # nocoverage
|
||||
report['zulip_version_file'] = f.read().strip()
|
||||
|
||||
def add_request_metadata(report: Dict[str, Any], request: HttpRequest) -> None:
|
||||
report['has_request'] = True
|
||||
|
|
|
@ -77,7 +77,8 @@ parameters, or specify no parameters for interactive user creation.""")
|
|||
|
||||
try:
|
||||
if options['password_file']:
|
||||
pw = open(options['password_file'], 'r').read()
|
||||
with open(options['password_file'], 'r') as f:
|
||||
pw = f.read()
|
||||
elif options['password']:
|
||||
pw = options['password']
|
||||
else:
|
||||
|
|
|
@ -3062,7 +3062,8 @@ class TestZulipLDAPUserPopulator(ZulipLDAPTestCase):
|
|||
@use_s3_backend
|
||||
def test_update_user_avatar_for_s3(self) -> None:
|
||||
bucket = create_s3_buckets(settings.S3_AVATAR_BUCKET)[0]
|
||||
test_image_data = open(get_test_image_file('img.png').name, 'rb').read()
|
||||
with open(get_test_image_file('img.png').name, 'rb') as f:
|
||||
test_image_data = f.read()
|
||||
|
||||
self.mock_ldap.directory = {
|
||||
'uid=hamlet,ou=users,dc=zulip,dc=com': {
|
||||
|
|
|
@ -262,8 +262,8 @@ class BugdownTest(ZulipTestCase):
|
|||
|
||||
def load_bugdown_tests(self) -> Tuple[Dict[str, Any], List[List[str]]]:
|
||||
test_fixtures = {}
|
||||
data_file = open(os.path.join(os.path.dirname(__file__), 'fixtures/markdown_test_cases.json'), 'r')
|
||||
data = ujson.loads('\n'.join(data_file.readlines()))
|
||||
with open(os.path.join(os.path.dirname(__file__), 'fixtures/markdown_test_cases.json'), 'r') as f:
|
||||
data = ujson.loads('\n'.join(f.readlines()))
|
||||
for test in data['regular_tests']:
|
||||
test_fixtures[test['name']] = test
|
||||
|
||||
|
|
|
@ -301,7 +301,8 @@ class ImportExportTest(ZulipTestCase):
|
|||
upload_emoji_image(img_file, '1.png', user_profile)
|
||||
with get_test_image_file('img.png') as img_file:
|
||||
upload_avatar_image(img_file, user_profile, user_profile)
|
||||
test_image = open(get_test_image_file('img.png').name, 'rb').read()
|
||||
with open(get_test_image_file('img.png').name, 'rb') as f:
|
||||
test_image = f.read()
|
||||
message.sender.avatar_source = 'U'
|
||||
message.sender.save()
|
||||
|
||||
|
@ -323,7 +324,7 @@ class ImportExportTest(ZulipTestCase):
|
|||
|
||||
# Test uploads
|
||||
fn = os.path.join(full_data['uploads_dir'], path_id)
|
||||
with open(fn) as f:
|
||||
with open(fn, 'r') as f:
|
||||
self.assertEqual(f.read(), 'zulip!')
|
||||
records = full_data['uploads_dir_records']
|
||||
self.assertEqual(records[0]['path'], path_id)
|
||||
|
@ -340,7 +341,8 @@ class ImportExportTest(ZulipTestCase):
|
|||
|
||||
# Test avatars
|
||||
fn = os.path.join(full_data['avatar_dir'], original_avatar_path_id)
|
||||
fn_data = open(fn, 'rb').read()
|
||||
with open(fn, 'rb') as fb:
|
||||
fn_data = fb.read()
|
||||
self.assertEqual(fn_data, test_image)
|
||||
records = full_data['avatar_dir_records']
|
||||
record_path = [record['path'] for record in records]
|
||||
|
@ -370,7 +372,7 @@ class ImportExportTest(ZulipTestCase):
|
|||
# Test uploads
|
||||
fields = attachment_path_id.split('/')
|
||||
fn = os.path.join(full_data['uploads_dir'], os.path.join(fields[0], fields[1], fields[2]))
|
||||
with open(fn) as f:
|
||||
with open(fn, 'r') as f:
|
||||
self.assertEqual(f.read(), 'zulip!')
|
||||
records = full_data['uploads_dir_records']
|
||||
self.assertEqual(records[0]['path'], os.path.join(fields[0], fields[1], fields[2]))
|
||||
|
@ -390,7 +392,8 @@ class ImportExportTest(ZulipTestCase):
|
|||
|
||||
# Test avatars
|
||||
fn = os.path.join(full_data['avatar_dir'], original_avatar_path_id)
|
||||
fn_data = open(fn, 'rb').read()
|
||||
with open(fn, 'rb') as file:
|
||||
fn_data = file.read()
|
||||
self.assertEqual(fn_data, test_image)
|
||||
records = full_data['avatar_dir_records']
|
||||
record_path = [record['path'] for record in records]
|
||||
|
@ -994,7 +997,8 @@ class ImportExportTest(ZulipTestCase):
|
|||
do_import_realm(os.path.join(settings.TEST_WORKER_DIR, 'test-export'),
|
||||
'test-zulip')
|
||||
imported_realm = Realm.objects.get(string_id='test-zulip')
|
||||
test_image_data = open(get_test_image_file('img.png').name, 'rb').read()
|
||||
with open(get_test_image_file('img.png').name, 'rb') as f:
|
||||
test_image_data = f.read()
|
||||
|
||||
# Test attachments
|
||||
uploaded_file = Attachment.objects.get(realm=imported_realm)
|
||||
|
|
|
@ -490,7 +490,8 @@ class WorkerTest(ZulipTestCase):
|
|||
"Problem handling data on queue unreliable_worker")
|
||||
|
||||
self.assertEqual(processed, ['good', 'fine', 'back to normal'])
|
||||
line = open(fn).readline().strip()
|
||||
with open(fn, 'r') as f:
|
||||
line = f.readline().strip()
|
||||
event = ujson.loads(line.split('\t')[1])
|
||||
self.assertEqual(event["type"], 'unexpected behaviour')
|
||||
|
||||
|
|
|
@ -24,8 +24,9 @@ class SlackMessageConversion(ZulipTestCase):
|
|||
|
||||
def load_slack_message_conversion_tests(self) -> Dict[Any, Any]:
|
||||
test_fixtures = {}
|
||||
data_file = open(os.path.join(os.path.dirname(__file__), 'fixtures/slack_message_conversion.json'), 'r')
|
||||
data = ujson.loads('\n'.join(data_file.readlines()))
|
||||
with open(os.path.join(os.path.dirname(__file__),
|
||||
'fixtures/slack_message_conversion.json'), 'r') as f:
|
||||
data = ujson.loads('\n'.join(f.readlines()))
|
||||
for test in data['regular_tests']:
|
||||
test_fixtures[test['name']] = test
|
||||
|
||||
|
|
|
@ -1558,7 +1558,8 @@ class S3Test(ZulipTestCase):
|
|||
|
||||
with get_test_image_file('img.png') as image_file:
|
||||
zerver.lib.upload.upload_backend.upload_avatar_image(image_file, user_profile, user_profile)
|
||||
test_image_data = open(get_test_image_file('img.png').name, 'rb').read()
|
||||
with open(get_test_image_file('img.png').name, 'rb') as f:
|
||||
test_image_data = f.read()
|
||||
test_medium_image_data = resize_avatar(test_image_data, MEDIUM_AVATAR_SIZE)
|
||||
|
||||
original_image_key = bucket.get_key(original_image_path_id)
|
||||
|
|
|
@ -60,7 +60,8 @@ def get_fixtures(request: HttpResponse,
|
|||
|
||||
for fixture in os.listdir(fixtures_dir):
|
||||
fixture_path = os.path.join(fixtures_dir, fixture)
|
||||
body = open(fixture_path).read()
|
||||
with open(fixture_path, 'r') as f:
|
||||
body = f.read()
|
||||
try:
|
||||
body = ujson.loads(body)
|
||||
except ValueError:
|
||||
|
@ -115,7 +116,8 @@ def send_all_webhook_fixture_messages(request: HttpRequest,
|
|||
responses = []
|
||||
for fixture in os.listdir(fixtures_dir):
|
||||
fixture_path = os.path.join(fixtures_dir, fixture)
|
||||
content = open(fixture_path).read()
|
||||
with open(fixture_path, 'r') as f:
|
||||
content = f.read()
|
||||
x = fixture.split(".")
|
||||
fixture_name, fixture_format = "".join(_ for _ in x[:-1]), x[-1]
|
||||
headers = get_fixture_http_headers(integration_name, fixture_name)
|
||||
|
|
Loading…
Reference in New Issue