mirror of https://github.com/zulip/zulip.git
python: Simplify with str.removeprefix, str.removesuffix.
These are available in Python ≥ 3.9. https://docs.python.org/3/library/stdtypes.html#str.removeprefix Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
parent
1ec4539550
commit
91ade25ba3
|
@ -610,7 +610,7 @@ def client_label_map(name: str) -> str:
|
||||||
if name in ["ZulipPython", "API: Python"]:
|
if name in ["ZulipPython", "API: Python"]:
|
||||||
return "Python API"
|
return "Python API"
|
||||||
if name.startswith("Zulip") and name.endswith("Webhook"):
|
if name.startswith("Zulip") and name.endswith("Webhook"):
|
||||||
return name[len("Zulip") : -len("Webhook")] + " webhook"
|
return name.removeprefix("Zulip").removesuffix("Webhook") + " webhook"
|
||||||
return name
|
return name
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -135,10 +135,7 @@ def get_next_page_param_from_request_path(request: HttpRequest) -> str | None:
|
||||||
# Therefore we can use this nice property to figure out easily what
|
# Therefore we can use this nice property to figure out easily what
|
||||||
# kind of page the user is trying to access and find the right value
|
# kind of page the user is trying to access and find the right value
|
||||||
# for the `next` query parameter.
|
# for the `next` query parameter.
|
||||||
path = request.path
|
path = request.path.removesuffix("/")
|
||||||
if path.endswith("/"):
|
|
||||||
path = path[:-1]
|
|
||||||
|
|
||||||
page_type = path.split("/")[-1]
|
page_type = path.split("/")[-1]
|
||||||
|
|
||||||
from corporate.views.remote_billing_page import (
|
from corporate.views.remote_billing_page import (
|
||||||
|
|
|
@ -120,15 +120,16 @@ def stripe_fixture_path(
|
||||||
) -> str:
|
) -> str:
|
||||||
# Make the eventual filename a bit shorter, and also we conventionally
|
# Make the eventual filename a bit shorter, and also we conventionally
|
||||||
# use test_* for the python test files
|
# use test_* for the python test files
|
||||||
if decorated_function_name[:5] == "test_":
|
decorated_function_name = decorated_function_name.removeprefix("test_")
|
||||||
decorated_function_name = decorated_function_name[5:]
|
mocked_function_name = mocked_function_name.removeprefix("stripe.")
|
||||||
return f"{STRIPE_FIXTURES_DIR}/{decorated_function_name}--{mocked_function_name[7:]}.{call_count}.json"
|
return (
|
||||||
|
f"{STRIPE_FIXTURES_DIR}/{decorated_function_name}--{mocked_function_name}.{call_count}.json"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def fixture_files_for_function(decorated_function: CallableT) -> list[str]: # nocoverage
|
def fixture_files_for_function(decorated_function: CallableT) -> list[str]: # nocoverage
|
||||||
decorated_function_name = decorated_function.__name__
|
decorated_function_name = decorated_function.__name__
|
||||||
if decorated_function_name[:5] == "test_":
|
decorated_function_name = decorated_function_name.removeprefix("test_")
|
||||||
decorated_function_name = decorated_function_name[5:]
|
|
||||||
return sorted(
|
return sorted(
|
||||||
f"{STRIPE_FIXTURES_DIR}/{f}"
|
f"{STRIPE_FIXTURES_DIR}/{f}"
|
||||||
for f in os.listdir(STRIPE_FIXTURES_DIR)
|
for f in os.listdir(STRIPE_FIXTURES_DIR)
|
||||||
|
|
|
@ -54,7 +54,7 @@ def write_updated_configs() -> None:
|
||||||
).strip()
|
).strip()
|
||||||
for key, shards in config_file["tornado_sharding"].items():
|
for key, shards in config_file["tornado_sharding"].items():
|
||||||
if key.endswith("_regex"):
|
if key.endswith("_regex"):
|
||||||
ports = [int(port) for port in key[: -len("_regex")].split("_")]
|
ports = [int(port) for port in key.removesuffix("_regex").split("_")]
|
||||||
shard_regexes.append((shards, ports[0] if len(ports) == 1 else ports))
|
shard_regexes.append((shards, ports[0] if len(ports) == 1 else ports))
|
||||||
nginx_sharding_conf_f.write(
|
nginx_sharding_conf_f.write(
|
||||||
f" {nginx_quote('~*' + shards)} http://tornado{'_'.join(map(str, ports))};\n"
|
f" {nginx_quote('~*' + shards)} http://tornado{'_'.join(map(str, ports))};\n"
|
||||||
|
|
|
@ -58,7 +58,8 @@ def list_supervisor_processes(
|
||||||
for filter_name in filter_names:
|
for filter_name in filter_names:
|
||||||
# zulip-tornado:* matches zulip-tornado:9800 and zulip-tornado
|
# zulip-tornado:* matches zulip-tornado:9800 and zulip-tornado
|
||||||
if filter_name.endswith(":*") and (
|
if filter_name.endswith(":*") and (
|
||||||
name.startswith(filter_name[:-1]) or name == filter_name[:-2]
|
name.startswith(filter_name.removesuffix("*"))
|
||||||
|
or name == filter_name.removesuffix(":*")
|
||||||
):
|
):
|
||||||
match = True
|
match = True
|
||||||
break
|
break
|
||||||
|
|
|
@ -122,7 +122,7 @@ try:
|
||||||
keep_refs = set()
|
keep_refs = set()
|
||||||
for worktree_line in worktree_data:
|
for worktree_line in worktree_data:
|
||||||
if worktree_line.startswith("branch "):
|
if worktree_line.startswith("branch "):
|
||||||
keep_refs.add(worktree_line[len("branch ") :])
|
keep_refs.add(worktree_line.removeprefix("branch "))
|
||||||
|
|
||||||
delete_input = "".join(
|
delete_input = "".join(
|
||||||
f"delete {refname}\n" for refname in matching_refs if refname not in keep_refs
|
f"delete {refname}\n" for refname in matching_refs if refname not in keep_refs
|
||||||
|
|
|
@ -639,7 +639,7 @@ def get_tornado_ports(config_file: configparser.RawConfigParser) -> list[int]:
|
||||||
{
|
{
|
||||||
int(port)
|
int(port)
|
||||||
for key in config_file.options("tornado_sharding")
|
for key in config_file.options("tornado_sharding")
|
||||||
for port in (key[: -len("_regex")] if key.endswith("_regex") else key).split("_")
|
for port in key.removesuffix("_regex").split("_")
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
if not ports:
|
if not ports:
|
||||||
|
|
|
@ -454,9 +454,9 @@ def print_line(
|
||||||
ts = match["time"]
|
ts = match["time"]
|
||||||
|
|
||||||
if match["duration"].endswith("ms"):
|
if match["duration"].endswith("ms"):
|
||||||
duration_ms = int(match["duration"][:-2])
|
duration_ms = int(match["duration"].removesuffix("ms"))
|
||||||
else:
|
else:
|
||||||
duration_ms = int(float(match["duration"][:-1]) * 1000)
|
duration_ms = int(float(match["duration"].removesuffix("s")) * 1000)
|
||||||
|
|
||||||
code = int(match["code"])
|
code = int(match["code"])
|
||||||
indicator = " "
|
indicator = " "
|
||||||
|
|
|
@ -165,8 +165,8 @@ class BaseDocumentationSpider(scrapy.Spider):
|
||||||
if split_url.hostname == "github.com" and f"{split_url.path}/".startswith(
|
if split_url.hostname == "github.com" and f"{split_url.path}/".startswith(
|
||||||
f"{ZULIP_SERVER_GITHUB_FILE_PATH_PREFIX}/"
|
f"{ZULIP_SERVER_GITHUB_FILE_PATH_PREFIX}/"
|
||||||
):
|
):
|
||||||
file_path = (
|
file_path = DEPLOY_ROOT + split_url.path.removeprefix(
|
||||||
DEPLOY_ROOT + split_url.path[len(ZULIP_SERVER_GITHUB_FILE_PATH_PREFIX) :]
|
ZULIP_SERVER_GITHUB_FILE_PATH_PREFIX
|
||||||
)
|
)
|
||||||
if not os.path.isfile(file_path):
|
if not os.path.isfile(file_path):
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
|
@ -176,8 +176,8 @@ class BaseDocumentationSpider(scrapy.Spider):
|
||||||
elif split_url.hostname == "github.com" and f"{split_url.path}/".startswith(
|
elif split_url.hostname == "github.com" and f"{split_url.path}/".startswith(
|
||||||
f"{ZULIP_SERVER_GITHUB_DIRECTORY_PATH_PREFIX}/"
|
f"{ZULIP_SERVER_GITHUB_DIRECTORY_PATH_PREFIX}/"
|
||||||
):
|
):
|
||||||
dir_path = (
|
dir_path = DEPLOY_ROOT + split_url.path.removeprefix(
|
||||||
DEPLOY_ROOT + split_url.path[len(ZULIP_SERVER_GITHUB_DIRECTORY_PATH_PREFIX) :]
|
ZULIP_SERVER_GITHUB_DIRECTORY_PATH_PREFIX
|
||||||
)
|
)
|
||||||
if not os.path.isdir(dir_path):
|
if not os.path.isdir(dir_path):
|
||||||
self.logger.error(
|
self.logger.error(
|
||||||
|
|
|
@ -37,7 +37,7 @@ UUID_VAR_PATH = get_dev_uuid_var_path()
|
||||||
with get_tzdata_zi() as f:
|
with get_tzdata_zi() as f:
|
||||||
line = f.readline()
|
line = f.readline()
|
||||||
assert line.startswith("# version ")
|
assert line.startswith("# version ")
|
||||||
timezones_version = line[len("# version ") :]
|
timezones_version = line.removeprefix("# version ")
|
||||||
|
|
||||||
|
|
||||||
def create_var_directories() -> None:
|
def create_var_directories() -> None:
|
||||||
|
|
|
@ -186,9 +186,7 @@ def tokenize(text: str, template_format: str | None = None) -> list[Token]:
|
||||||
kind = "handlebars_else"
|
kind = "handlebars_else"
|
||||||
elif looking_at_handlebars_start():
|
elif looking_at_handlebars_start():
|
||||||
s = get_handlebars_tag(text, state.i)
|
s = get_handlebars_tag(text, state.i)
|
||||||
tag = s[3:-2].split()[0].strip("#")
|
tag = s[3:-2].split()[0].strip("#").removeprefix("*")
|
||||||
if tag.startswith("*"):
|
|
||||||
tag = tag[1:]
|
|
||||||
kind = "handlebars_start"
|
kind = "handlebars_start"
|
||||||
elif looking_at_handlebars_end():
|
elif looking_at_handlebars_end():
|
||||||
s = get_handlebars_tag(text, state.i)
|
s = get_handlebars_tag(text, state.i)
|
||||||
|
|
|
@ -120,8 +120,7 @@ def get_requests_headers(integration_name: str, fixture_name: str) -> dict[str,
|
||||||
headers = get_fixture_http_headers(integration_name, fixture_name)
|
headers = get_fixture_http_headers(integration_name, fixture_name)
|
||||||
|
|
||||||
def fix_name(header: str) -> str:
|
def fix_name(header: str) -> str:
|
||||||
header = header if not header.startswith("HTTP_") else header[len("HTTP_") :]
|
return header.removeprefix("HTTP_").replace("_", "-")
|
||||||
return header.replace("_", "-")
|
|
||||||
|
|
||||||
return {fix_name(k): v for k, v in headers.items()}
|
return {fix_name(k): v for k, v in headers.items()}
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ print("Fetching existing hashes..")
|
||||||
for obj_summary in bucket.objects.filter(Prefix="server/zulip-server-"):
|
for obj_summary in bucket.objects.filter(Prefix="server/zulip-server-"):
|
||||||
head = client.head_object(Bucket=bucket.name, Key=obj_summary.key)
|
head = client.head_object(Bucket=bucket.name, Key=obj_summary.key)
|
||||||
assert obj_summary.key.startswith("server/")
|
assert obj_summary.key.startswith("server/")
|
||||||
filename = obj_summary.key[len("server/") :]
|
filename = obj_summary.key.removeprefix("server/")
|
||||||
if filename in file_hashes:
|
if filename in file_hashes:
|
||||||
print(f" {filename} was already uploaded, skipping existing hash")
|
print(f" {filename} was already uploaded, skipping existing hash")
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -78,7 +78,7 @@ def get_or_create_key_prefix() -> str:
|
||||||
tries = 1
|
tries = 1
|
||||||
while tries < 10:
|
while tries < 10:
|
||||||
with open(filename) as f:
|
with open(filename) as f:
|
||||||
prefix = f.readline()[:-1]
|
prefix = f.readline().removesuffix("\n")
|
||||||
if len(prefix) == 33:
|
if len(prefix) == 33:
|
||||||
break
|
break
|
||||||
tries += 1
|
tries += 1
|
||||||
|
@ -214,7 +214,7 @@ def cache_get_many(keys: list[str], cache_name: str | None = None) -> dict[str,
|
||||||
remote_cache_stats_start()
|
remote_cache_stats_start()
|
||||||
ret = get_cache_backend(cache_name).get_many(keys)
|
ret = get_cache_backend(cache_name).get_many(keys)
|
||||||
remote_cache_stats_finish()
|
remote_cache_stats_finish()
|
||||||
return {key[len(KEY_PREFIX) :]: value for key, value in ret.items()}
|
return {key.removeprefix(KEY_PREFIX): value for key, value in ret.items()}
|
||||||
|
|
||||||
|
|
||||||
def safe_cache_get_many(keys: list[str], cache_name: str | None = None) -> dict[str, Any]:
|
def safe_cache_get_many(keys: list[str], cache_name: str | None = None) -> dict[str, Any]:
|
||||||
|
|
|
@ -44,7 +44,7 @@ def redact_email_address(error_message: str) -> str:
|
||||||
domain = Address(addr_spec=settings.EMAIL_GATEWAY_PATTERN).domain
|
domain = Address(addr_spec=settings.EMAIL_GATEWAY_PATTERN).domain
|
||||||
else:
|
else:
|
||||||
# EMAIL_GATEWAY_EXTRA_PATTERN_HACK is of the form '@example.com'
|
# EMAIL_GATEWAY_EXTRA_PATTERN_HACK is of the form '@example.com'
|
||||||
domain = settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK[1:]
|
domain = settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK.removeprefix("@")
|
||||||
|
|
||||||
def redact(address_match: Match[str]) -> str:
|
def redact(address_match: Match[str]) -> str:
|
||||||
email_address = address_match[0]
|
email_address = address_match[0]
|
||||||
|
|
|
@ -54,8 +54,7 @@ def get_latest_github_release_download_link_for_platform(platform: str) -> str:
|
||||||
|
|
||||||
latest_version = get_latest_github_release_version_for_repo("zulip-desktop")
|
latest_version = get_latest_github_release_version_for_repo("zulip-desktop")
|
||||||
if latest_version:
|
if latest_version:
|
||||||
if latest_version[0] in ["v", "V"]:
|
latest_version = latest_version.removeprefix("v")
|
||||||
latest_version = latest_version[1:]
|
|
||||||
setup_file = PLATFORM_TO_SETUP_FILE[platform].format(version=latest_version)
|
setup_file = PLATFORM_TO_SETUP_FILE[platform].format(version=latest_version)
|
||||||
link = f"https://desktop-download.zulip.com/v{latest_version}/{setup_file}"
|
link = f"https://desktop-download.zulip.com/v{latest_version}/{setup_file}"
|
||||||
if verify_release_download_link(link):
|
if verify_release_download_link(link):
|
||||||
|
|
|
@ -269,7 +269,7 @@ def rewrite_local_links_to_relative(db_data: DbData | None, link: str) -> str:
|
||||||
if db_data:
|
if db_data:
|
||||||
realm_url_prefix = db_data.realm_url + "/"
|
realm_url_prefix = db_data.realm_url + "/"
|
||||||
if link.startswith((realm_url_prefix + "#", realm_url_prefix + "user_uploads/")):
|
if link.startswith((realm_url_prefix + "#", realm_url_prefix + "user_uploads/")):
|
||||||
return link[len(realm_url_prefix) :]
|
return link.removeprefix(realm_url_prefix)
|
||||||
|
|
||||||
return link
|
return link
|
||||||
|
|
||||||
|
@ -622,7 +622,7 @@ class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
|
||||||
a.set("data-id", data_id)
|
a.set("data-id", data_id)
|
||||||
img = SubElement(a, "img")
|
img = SubElement(a, "img")
|
||||||
if image_url.startswith("/user_uploads/") and self.zmd.zulip_db_data:
|
if image_url.startswith("/user_uploads/") and self.zmd.zulip_db_data:
|
||||||
path_id = image_url[len("/user_uploads/") :]
|
path_id = image_url.removeprefix("/user_uploads/")
|
||||||
|
|
||||||
# We should have pulled the preview data for this image
|
# We should have pulled the preview data for this image
|
||||||
# (even if that's "no preview yet") from the database
|
# (even if that's "no preview yet") from the database
|
||||||
|
@ -737,7 +737,7 @@ class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
|
||||||
# a row for the ImageAttachment, then its header didn't parse
|
# a row for the ImageAttachment, then its header didn't parse
|
||||||
# as a valid image type which libvips handles.
|
# as a valid image type which libvips handles.
|
||||||
if url.startswith("/user_uploads/") and self.zmd.zulip_db_data:
|
if url.startswith("/user_uploads/") and self.zmd.zulip_db_data:
|
||||||
path_id = url[len("/user_uploads/") :]
|
path_id = url.removeprefix("/user_uploads/")
|
||||||
return path_id in self.zmd.zulip_db_data.user_upload_previews
|
return path_id in self.zmd.zulip_db_data.user_upload_previews
|
||||||
|
|
||||||
return any(parsed_url.path.lower().endswith(ext) for ext in IMAGE_EXTENSIONS)
|
return any(parsed_url.path.lower().endswith(ext) for ext in IMAGE_EXTENSIONS)
|
||||||
|
@ -825,7 +825,7 @@ class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
|
||||||
elif split_url.path.startswith(("/embed/", "/shorts/", "/v/")):
|
elif split_url.path.startswith(("/embed/", "/shorts/", "/v/")):
|
||||||
id = split_url.path.split("/", 3)[2]
|
id = split_url.path.split("/", 3)[2]
|
||||||
elif split_url.hostname == "youtu.be" and split_url.path.startswith("/"):
|
elif split_url.hostname == "youtu.be" and split_url.path.startswith("/"):
|
||||||
id = split_url.path[len("/") :]
|
id = split_url.path.removeprefix("/")
|
||||||
|
|
||||||
if id is not None and re.fullmatch(r"[0-9A-Za-z_-]+", id):
|
if id is not None and re.fullmatch(r"[0-9A-Za-z_-]+", id):
|
||||||
return id
|
return id
|
||||||
|
@ -1274,7 +1274,7 @@ class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
|
||||||
if not parsed_url.path.startswith("/user_uploads/"):
|
if not parsed_url.path.startswith("/user_uploads/"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
path_id = parsed_url.path[len("/user_uploads/") :]
|
path_id = parsed_url.path.removeprefix("/user_uploads/")
|
||||||
self.zmd.zulip_rendering_result.potential_attachment_path_ids.append(path_id)
|
self.zmd.zulip_rendering_result.potential_attachment_path_ids.append(path_id)
|
||||||
|
|
||||||
if len(found_urls) == 0:
|
if len(found_urls) == 0:
|
||||||
|
|
|
@ -299,9 +299,7 @@ def makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:
|
||||||
def generate_data_type(schema: Mapping[str, Any]) -> str:
|
def generate_data_type(schema: Mapping[str, Any]) -> str:
|
||||||
data_type = ""
|
data_type = ""
|
||||||
if "oneOf" in schema:
|
if "oneOf" in schema:
|
||||||
for item in schema["oneOf"]:
|
data_type = " | ".join(generate_data_type(item) for item in schema["oneOf"])
|
||||||
data_type = data_type + generate_data_type(item) + " | "
|
|
||||||
data_type = data_type[:-3]
|
|
||||||
elif "items" in schema:
|
elif "items" in schema:
|
||||||
data_type = "(" + generate_data_type(schema["items"]) + ")[]"
|
data_type = "(" + generate_data_type(schema["items"]) + ")[]"
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -29,7 +29,7 @@ class StaticImageProcessor(markdown.treeprocessors.Treeprocessor):
|
||||||
for img in root.iter("img"):
|
for img in root.iter("img"):
|
||||||
url = img.get("src")
|
url = img.get("src")
|
||||||
if url is not None and url.startswith("/static/"):
|
if url is not None and url.startswith("/static/"):
|
||||||
img.set("src", staticfiles_storage.url(url[len("/static/") :]))
|
img.set("src", staticfiles_storage.url(url.removeprefix("/static/")))
|
||||||
|
|
||||||
|
|
||||||
def makeExtension(*args: Any, **kwargs: str) -> MarkdownStaticImagesGenerator:
|
def makeExtension(*args: Any, **kwargs: str) -> MarkdownStaticImagesGenerator:
|
||||||
|
|
|
@ -9,11 +9,11 @@ def is_reserved_subdomain(subdomain: str) -> bool:
|
||||||
return True
|
return True
|
||||||
if subdomain in ZULIP_RESERVED_SUBDOMAINS:
|
if subdomain in ZULIP_RESERVED_SUBDOMAINS:
|
||||||
return True
|
return True
|
||||||
if subdomain[-1] == "s" and subdomain[:-1] in ZULIP_RESERVED_SUBDOMAINS:
|
if subdomain.endswith("s") and subdomain.removesuffix("s") in ZULIP_RESERVED_SUBDOMAINS:
|
||||||
return True
|
return True
|
||||||
if subdomain in GENERIC_RESERVED_SUBDOMAINS:
|
if subdomain in GENERIC_RESERVED_SUBDOMAINS:
|
||||||
return True
|
return True
|
||||||
if subdomain[-1] == "s" and subdomain[:-1] in GENERIC_RESERVED_SUBDOMAINS:
|
if subdomain.endswith("s") and subdomain.removesuffix("s") in GENERIC_RESERVED_SUBDOMAINS:
|
||||||
return True
|
return True
|
||||||
if settings.CORPORATE_ENABLED and ("zulip" in subdomain or "kandra" in subdomain):
|
if settings.CORPORATE_ENABLED and ("zulip" in subdomain or "kandra" in subdomain):
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -1647,7 +1647,7 @@ Output:
|
||||||
# as the actual value of the attribute in LDAP.
|
# as the actual value of the attribute in LDAP.
|
||||||
for attr, value in attrs.items():
|
for attr, value in attrs.items():
|
||||||
if isinstance(value, str) and value.startswith("file:"):
|
if isinstance(value, str) and value.startswith("file:"):
|
||||||
with open(value[5:], "rb") as f:
|
with open(value.removeprefix("file:"), "rb") as f:
|
||||||
attrs[attr] = [f.read()]
|
attrs[attr] = [f.read()]
|
||||||
|
|
||||||
ldap_patcher = mock.patch("django_auth_ldap.config.ldap.initialize")
|
ldap_patcher = mock.patch("django_auth_ldap.config.ldap.initialize")
|
||||||
|
|
|
@ -489,14 +489,10 @@ def write_instrumentation_reports(full_suite: bool, include_webhooks: bool) -> N
|
||||||
find_pattern(pattern, prefixes)
|
find_pattern(pattern, prefixes)
|
||||||
|
|
||||||
def cleanup_url(url: str) -> str:
|
def cleanup_url(url: str) -> str:
|
||||||
if url.startswith("/"):
|
url = url.removeprefix("/")
|
||||||
url = url[1:]
|
url = url.removeprefix("http://testserver/")
|
||||||
if url.startswith("http://testserver/"):
|
url = url.removeprefix("http://zulip.testserver/")
|
||||||
url = url[len("http://testserver/") :]
|
url = url.removeprefix("http://testserver:9080/")
|
||||||
if url.startswith("http://zulip.testserver/"):
|
|
||||||
url = url[len("http://zulip.testserver/") :]
|
|
||||||
if url.startswith("http://testserver:9080/"):
|
|
||||||
url = url[len("http://testserver:9080/") :]
|
|
||||||
return url
|
return url
|
||||||
|
|
||||||
def find_pattern(pattern: Any, prefixes: list[str]) -> None:
|
def find_pattern(pattern: Any, prefixes: list[str]) -> None:
|
||||||
|
@ -516,7 +512,7 @@ def write_instrumentation_reports(full_suite: bool, include_webhooks: bool) -> N
|
||||||
|
|
||||||
for prefix in prefixes:
|
for prefix in prefixes:
|
||||||
if url.startswith(prefix):
|
if url.startswith(prefix):
|
||||||
match_url = url[len(prefix) :]
|
match_url = url.removeprefix(prefix)
|
||||||
if pattern.resolve(match_url):
|
if pattern.resolve(match_url):
|
||||||
if call["status_code"] in [200, 204, 301, 302]:
|
if call["status_code"] in [200, 204, 301, 302]:
|
||||||
cnt += 1
|
cnt += 1
|
||||||
|
|
|
@ -346,7 +346,7 @@ class Runner(DiscoverRunner):
|
||||||
prefix = "unittest.loader._FailedTest."
|
prefix = "unittest.loader._FailedTest."
|
||||||
for test_name in get_test_names(suite):
|
for test_name in get_test_names(suite):
|
||||||
if test_name.startswith(prefix):
|
if test_name.startswith(prefix):
|
||||||
test_name = test_name[len(prefix) :]
|
test_name = test_name.removeprefix(prefix)
|
||||||
for label in test_labels:
|
for label in test_labels:
|
||||||
# This code block is for when a test label is
|
# This code block is for when a test label is
|
||||||
# directly provided, for example:
|
# directly provided, for example:
|
||||||
|
|
|
@ -426,7 +426,7 @@ def rewrite_thumbnailed_images(
|
||||||
# second image, the first image will have no placeholder.
|
# second image, the first image will have no placeholder.
|
||||||
continue
|
continue
|
||||||
|
|
||||||
path_id = image_link["href"][len("/user_uploads/") :]
|
path_id = image_link["href"].removeprefix("/user_uploads/")
|
||||||
if to_delete and path_id in to_delete:
|
if to_delete and path_id in to_delete:
|
||||||
# This was not a valid thumbnail target, for some reason.
|
# This was not a valid thumbnail target, for some reason.
|
||||||
# Trim out the whole "message_inline_image" element, since
|
# Trim out the whole "message_inline_image" element, since
|
||||||
|
|
|
@ -294,7 +294,7 @@ def get_topic_resolution_and_bare_name(stored_name: str) -> tuple[bool, str]:
|
||||||
- The topic name with the resolution prefix, if present in stored_name, removed
|
- The topic name with the resolution prefix, if present in stored_name, removed
|
||||||
"""
|
"""
|
||||||
if stored_name.startswith(RESOLVED_TOPIC_PREFIX):
|
if stored_name.startswith(RESOLVED_TOPIC_PREFIX):
|
||||||
return (True, stored_name[len(RESOLVED_TOPIC_PREFIX) :])
|
return (True, stored_name.removeprefix(RESOLVED_TOPIC_PREFIX))
|
||||||
|
|
||||||
return (False, stored_name)
|
return (False, stored_name)
|
||||||
|
|
||||||
|
|
|
@ -239,7 +239,7 @@ class LocalUploadBackend(ZulipUploadBackend):
|
||||||
def delete_export_tarball(self, export_path: str) -> str | None:
|
def delete_export_tarball(self, export_path: str) -> str | None:
|
||||||
# Get the last element of a list in the form ['user_avatars', '<file_path>']
|
# Get the last element of a list in the form ['user_avatars', '<file_path>']
|
||||||
assert export_path.startswith("/")
|
assert export_path.startswith("/")
|
||||||
file_path = export_path[1:].split("/", 1)[-1]
|
file_path = export_path.removeprefix("/").split("/", 1)[-1]
|
||||||
if delete_local_file("avatars", file_path):
|
if delete_local_file("avatars", file_path):
|
||||||
return export_path
|
return export_path
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -183,7 +183,7 @@ class S3UploadBackend(ZulipUploadBackend):
|
||||||
assert split_url.path.endswith(f"/{DUMMY_KEY}")
|
assert split_url.path.endswith(f"/{DUMMY_KEY}")
|
||||||
|
|
||||||
return urlunsplit(
|
return urlunsplit(
|
||||||
(split_url.scheme, split_url.netloc, split_url.path[: -len(DUMMY_KEY)], "", "")
|
(split_url.scheme, split_url.netloc, split_url.path.removesuffix(DUMMY_KEY), "", "")
|
||||||
)
|
)
|
||||||
|
|
||||||
@override
|
@override
|
||||||
|
@ -395,7 +395,7 @@ class S3UploadBackend(ZulipUploadBackend):
|
||||||
@override
|
@override
|
||||||
def get_export_tarball_url(self, realm: Realm, export_path: str) -> str:
|
def get_export_tarball_url(self, realm: Realm, export_path: str) -> str:
|
||||||
# export_path has a leading /
|
# export_path has a leading /
|
||||||
return self.get_public_upload_url(export_path[1:])
|
return self.get_public_upload_url(export_path.removeprefix("/"))
|
||||||
|
|
||||||
@override
|
@override
|
||||||
def upload_export_tarball(
|
def upload_export_tarball(
|
||||||
|
@ -420,7 +420,7 @@ class S3UploadBackend(ZulipUploadBackend):
|
||||||
@override
|
@override
|
||||||
def delete_export_tarball(self, export_path: str) -> str | None:
|
def delete_export_tarball(self, export_path: str) -> str | None:
|
||||||
assert export_path.startswith("/")
|
assert export_path.startswith("/")
|
||||||
path_id = export_path[1:]
|
path_id = export_path.removeprefix("/")
|
||||||
if self.delete_file_from_s3(path_id, self.avatar_bucket):
|
if self.delete_file_from_s3(path_id, self.avatar_bucket):
|
||||||
return export_path
|
return export_path
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -12,7 +12,7 @@ def get_widget_data(content: str) -> tuple[str | None, Any]:
|
||||||
|
|
||||||
# tokens[0] will always exist
|
# tokens[0] will always exist
|
||||||
if tokens[0].startswith("/"):
|
if tokens[0].startswith("/"):
|
||||||
widget_type = tokens[0][1:]
|
widget_type = tokens[0].removeprefix("/")
|
||||||
if widget_type in valid_widget_types:
|
if widget_type in valid_widget_types:
|
||||||
remaining_content = content.replace(tokens[0], "", 1)
|
remaining_content = content.replace(tokens[0], "", 1)
|
||||||
extra_data = get_extra_data_from_widget_type(remaining_content, widget_type)
|
extra_data = get_extra_data_from_widget_type(remaining_content, widget_type)
|
||||||
|
|
|
@ -22,7 +22,7 @@ def process_zcommands(content: str, user_profile: UserProfile) -> dict[str, Any]
|
||||||
|
|
||||||
if not content.startswith("/"):
|
if not content.startswith("/"):
|
||||||
raise JsonableError(_("There should be a leading slash in the zcommand."))
|
raise JsonableError(_("There should be a leading slash in the zcommand."))
|
||||||
command = content[1:]
|
command = content.removeprefix("/")
|
||||||
|
|
||||||
if command == "ping":
|
if command == "ping":
|
||||||
return {}
|
return {}
|
||||||
|
|
|
@ -14,7 +14,7 @@ def compute_mit_user_fullname(email: str) -> str:
|
||||||
if hesiod_name != "":
|
if hesiod_name != "":
|
||||||
return hesiod_name
|
return hesiod_name
|
||||||
elif match_user:
|
elif match_user:
|
||||||
return match_user.group(1).lower() + "@" + match_user.group(2).upper()[1:]
|
return match_user.group(1).lower() + "@" + match_user.group(2).upper().removeprefix("|")
|
||||||
except DNS.Base.ServerError:
|
except DNS.Base.ServerError:
|
||||||
pass
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
|
@ -32,7 +32,7 @@ def remove_invalid_characters_from_user_group_name(
|
||||||
continue
|
continue
|
||||||
|
|
||||||
old_group_name = group.name
|
old_group_name = group.name
|
||||||
group.name = old_group_name[1:]
|
group.name = old_group_name.removeprefix("@")
|
||||||
groups_to_update.append(group)
|
groups_to_update.append(group)
|
||||||
|
|
||||||
# Fix the name of non-system groups as well.
|
# Fix the name of non-system groups as well.
|
||||||
|
@ -53,7 +53,7 @@ def remove_invalid_characters_from_user_group_name(
|
||||||
)
|
)
|
||||||
if len(matching_invalid_prefix) == 0:
|
if len(matching_invalid_prefix) == 0:
|
||||||
break
|
break
|
||||||
group_name = group_name[len(matching_invalid_prefix) :]
|
group_name = group_name.removeprefix(matching_invalid_prefix)
|
||||||
|
|
||||||
if len(group_name) > 0 and group_name not in existing_group_names_set:
|
if len(group_name) > 0 and group_name not in existing_group_names_set:
|
||||||
group.name = group_name
|
group.name = group_name
|
||||||
|
|
|
@ -16,7 +16,7 @@ from zerver.openapi.openapi import validate_against_openapi_schema
|
||||||
def test_js_bindings(client: Client) -> None:
|
def test_js_bindings(client: Client) -> None:
|
||||||
os.environ["ZULIP_USERNAME"] = client.email
|
os.environ["ZULIP_USERNAME"] = client.email
|
||||||
os.environ["ZULIP_API_KEY"] = client.api_key
|
os.environ["ZULIP_API_KEY"] = client.api_key
|
||||||
os.environ["ZULIP_REALM"] = client.base_url[:-5]
|
os.environ["ZULIP_REALM"] = client.base_url.removesuffix("/api/")
|
||||||
|
|
||||||
output = subprocess.check_output(
|
output = subprocess.check_output(
|
||||||
args=["node", "--unhandled-rejections=strict", "zerver/openapi/javascript_examples.js"],
|
args=["node", "--unhandled-rejections=strict", "zerver/openapi/javascript_examples.js"],
|
||||||
|
|
|
@ -160,7 +160,7 @@ def render_python_code_example(
|
||||||
"```python",
|
"```python",
|
||||||
*config,
|
*config,
|
||||||
# Remove one level of indentation and strip newlines
|
# Remove one level of indentation and strip newlines
|
||||||
*(line[4:].rstrip() for snippet in snippets for line in snippet),
|
*(line.removeprefix(" ").rstrip() for snippet in snippets for line in snippet),
|
||||||
"print(result)",
|
"print(result)",
|
||||||
"\n",
|
"\n",
|
||||||
"```",
|
"```",
|
||||||
|
|
|
@ -443,9 +443,9 @@ def validate_test_response(request: Request, response: Response) -> bool:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if request.path.startswith("/json/"):
|
if request.path.startswith("/json/"):
|
||||||
path = request.path[len("/json") :]
|
path = request.path.removeprefix("/json")
|
||||||
elif request.path.startswith("/api/v1/"):
|
elif request.path.startswith("/api/v1/"):
|
||||||
path = request.path[len("/api/v1") :]
|
path = request.path.removeprefix("/api/v1")
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
assert request.method is not None
|
assert request.method is not None
|
||||||
|
@ -571,14 +571,14 @@ def validate_test_request(
|
||||||
assert request.method is not None
|
assert request.method is not None
|
||||||
method = request.method.lower()
|
method = request.method.lower()
|
||||||
if request.path.startswith("/json/"):
|
if request.path.startswith("/json/"):
|
||||||
url = request.path[len("/json") :]
|
url = request.path.removeprefix("/json")
|
||||||
# Some JSON endpoints have different parameters compared to
|
# Some JSON endpoints have different parameters compared to
|
||||||
# their `/api/v1` counterparts.
|
# their `/api/v1` counterparts.
|
||||||
if (url, method) in SKIP_JSON:
|
if (url, method) in SKIP_JSON:
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
assert request.path.startswith("/api/v1/")
|
assert request.path.startswith("/api/v1/")
|
||||||
url = request.path[len("/api/v1") :]
|
url = request.path.removeprefix("/api/v1")
|
||||||
|
|
||||||
# TODO: Add support for file upload endpoints that lack the /json/
|
# TODO: Add support for file upload endpoints that lack the /json/
|
||||||
# or /api/v1/ prefix.
|
# or /api/v1/ prefix.
|
||||||
|
|
|
@ -71,7 +71,7 @@ class RealmExportTest(ZulipTestCase):
|
||||||
# Test that the file is hosted, and the contents are as expected.
|
# Test that the file is hosted, and the contents are as expected.
|
||||||
export_path = audit_log_entry.extra_data["export_path"]
|
export_path = audit_log_entry.extra_data["export_path"]
|
||||||
assert export_path.startswith("/")
|
assert export_path.startswith("/")
|
||||||
path_id = export_path[1:]
|
path_id = export_path.removeprefix("/")
|
||||||
self.assertEqual(bucket.Object(path_id).get()["Body"].read(), b"zulip!")
|
self.assertEqual(bucket.Object(path_id).get()["Body"].read(), b"zulip!")
|
||||||
|
|
||||||
result = self.client_get("/json/export/realm")
|
result = self.client_get("/json/export/realm")
|
||||||
|
|
|
@ -50,12 +50,12 @@ class ThumbnailRedirectEndpointTest(ZulipTestCase):
|
||||||
base = "/user_uploads/"
|
base = "/user_uploads/"
|
||||||
self.assertEqual(base, url[: len(base)])
|
self.assertEqual(base, url[: len(base)])
|
||||||
|
|
||||||
result = self.client_get("/thumbnail", {"url": url[1:], "size": "full"})
|
result = self.client_get("/thumbnail", {"url": url.removeprefix("/"), "size": "full"})
|
||||||
self.assertEqual(result.status_code, 200)
|
self.assertEqual(result.status_code, 200)
|
||||||
self.assertEqual(result.getvalue(), b"zulip!")
|
self.assertEqual(result.getvalue(), b"zulip!")
|
||||||
|
|
||||||
self.login("iago")
|
self.login("iago")
|
||||||
result = self.client_get("/thumbnail", {"url": url[1:], "size": "full"})
|
result = self.client_get("/thumbnail", {"url": url.removeprefix("/"), "size": "full"})
|
||||||
self.assertEqual(result.status_code, 403, result)
|
self.assertEqual(result.status_code, 403, result)
|
||||||
self.assert_in_response("You are not authorized to view this file.", result)
|
self.assert_in_response("You are not authorized to view this file.", result)
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ class ThumbnailRedirectEndpointTest(ZulipTestCase):
|
||||||
self.send_stream_message(self.example_user("hamlet"), "Denmark", body, "test")
|
self.send_stream_message(self.example_user("hamlet"), "Denmark", body, "test")
|
||||||
|
|
||||||
self.logout()
|
self.logout()
|
||||||
response = self.client_get("/thumbnail", {"url": url[1:], "size": "full"})
|
response = self.client_get("/thumbnail", {"url": url.removeprefix("/"), "size": "full"})
|
||||||
self.assertEqual(response.status_code, 302)
|
self.assertEqual(response.status_code, 302)
|
||||||
self.assertTrue(response["Location"].startswith("/accounts/login/?next="))
|
self.assertTrue(response["Location"].startswith("/accounts/login/?next="))
|
||||||
|
|
||||||
|
@ -104,12 +104,12 @@ class ThumbnailRedirectEndpointTest(ZulipTestCase):
|
||||||
self.send_stream_message(self.example_user("hamlet"), "web-public-stream", body, "test")
|
self.send_stream_message(self.example_user("hamlet"), "web-public-stream", body, "test")
|
||||||
|
|
||||||
self.logout()
|
self.logout()
|
||||||
response = self.client_get("/thumbnail", {"url": url[1:], "size": "full"})
|
response = self.client_get("/thumbnail", {"url": url.removeprefix("/"), "size": "full"})
|
||||||
self.assertEqual(response.status_code, 200)
|
self.assertEqual(response.status_code, 200)
|
||||||
|
|
||||||
# Deny file access since rate limited
|
# Deny file access since rate limited
|
||||||
with ratelimit_rule(86400, 0, domain="spectator_attachment_access_by_file"):
|
with ratelimit_rule(86400, 0, domain="spectator_attachment_access_by_file"):
|
||||||
response = self.client_get("/thumbnail", {"url": url[1:], "size": "full"})
|
response = self.client_get("/thumbnail", {"url": url.removeprefix("/"), "size": "full"})
|
||||||
self.assertEqual(response.status_code, 302)
|
self.assertEqual(response.status_code, 302)
|
||||||
self.assertTrue(response["Location"].startswith("/accounts/login/?next="))
|
self.assertTrue(response["Location"].startswith("/accounts/login/?next="))
|
||||||
|
|
||||||
|
|
|
@ -193,7 +193,7 @@ class S3Test(ZulipTestCase):
|
||||||
redirect_url = response["Location"]
|
redirect_url = response["Location"]
|
||||||
path = urlsplit(redirect_url).path
|
path = urlsplit(redirect_url).path
|
||||||
assert path.startswith("/")
|
assert path.startswith("/")
|
||||||
key = path[len("/") :]
|
key = path.removeprefix("/")
|
||||||
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
|
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
|
||||||
|
|
||||||
prefix = f"/internal/s3/{settings.S3_AUTH_UPLOADS_BUCKET}.s3.amazonaws.com/"
|
prefix = f"/internal/s3/{settings.S3_AUTH_UPLOADS_BUCKET}.s3.amazonaws.com/"
|
||||||
|
@ -202,7 +202,7 @@ class S3Test(ZulipTestCase):
|
||||||
redirect_url = response["X-Accel-Redirect"]
|
redirect_url = response["X-Accel-Redirect"]
|
||||||
path = urlsplit(redirect_url).path
|
path = urlsplit(redirect_url).path
|
||||||
assert path.startswith(prefix)
|
assert path.startswith(prefix)
|
||||||
key = path[len(prefix) :]
|
key = path.removeprefix(prefix)
|
||||||
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
|
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
|
||||||
|
|
||||||
# Check the download endpoint
|
# Check the download endpoint
|
||||||
|
@ -212,7 +212,7 @@ class S3Test(ZulipTestCase):
|
||||||
redirect_url = response["X-Accel-Redirect"]
|
redirect_url = response["X-Accel-Redirect"]
|
||||||
path = urlsplit(redirect_url).path
|
path = urlsplit(redirect_url).path
|
||||||
assert path.startswith(prefix)
|
assert path.startswith(prefix)
|
||||||
key = path[len(prefix) :]
|
key = path.removeprefix(prefix)
|
||||||
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
|
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
|
||||||
|
|
||||||
# Now try the endpoint that's supposed to return a temporary URL for access
|
# Now try the endpoint that's supposed to return a temporary URL for access
|
||||||
|
@ -232,7 +232,7 @@ class S3Test(ZulipTestCase):
|
||||||
redirect_url = response["X-Accel-Redirect"]
|
redirect_url = response["X-Accel-Redirect"]
|
||||||
path = urlsplit(redirect_url).path
|
path = urlsplit(redirect_url).path
|
||||||
assert path.startswith(prefix)
|
assert path.startswith(prefix)
|
||||||
key = path[len(prefix) :]
|
key = path.removeprefix(prefix)
|
||||||
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
|
self.assertEqual(b"zulip!", bucket.Object(key).get()["Body"].read())
|
||||||
|
|
||||||
# The original url shouldn't work when logged out:
|
# The original url shouldn't work when logged out:
|
||||||
|
@ -279,7 +279,7 @@ class S3Test(ZulipTestCase):
|
||||||
self.assertEqual(base, url[: len(base)])
|
self.assertEqual(base, url[: len(base)])
|
||||||
|
|
||||||
# Try hitting the equivalent `/user_avatars` endpoint
|
# Try hitting the equivalent `/user_avatars` endpoint
|
||||||
wrong_url = "/user_avatars/" + url[len(base) :]
|
wrong_url = "/user_avatars/" + url.removeprefix(base)
|
||||||
result = self.client_get(wrong_url)
|
result = self.client_get(wrong_url)
|
||||||
self.assertEqual(result.status_code, 301)
|
self.assertEqual(result.status_code, 301)
|
||||||
self.assertEqual(result["Location"], url)
|
self.assertEqual(result["Location"], url)
|
||||||
|
|
|
@ -94,9 +94,7 @@ def get_fixtures(request: HttpRequest, *, integration_name: PathOnly[str]) -> Ht
|
||||||
)
|
)
|
||||||
|
|
||||||
def fix_name(header: str) -> str: # nocoverage
|
def fix_name(header: str) -> str: # nocoverage
|
||||||
if header.startswith("HTTP_"): # HTTP_ is a prefix intended for Django.
|
return header.removeprefix("HTTP_") # HTTP_ is a prefix intended for Django.
|
||||||
return header[len("HTTP_") :]
|
|
||||||
return header
|
|
||||||
|
|
||||||
headers = {fix_name(k): v for k, v in headers_raw.items()}
|
headers = {fix_name(k): v for k, v in headers_raw.items()}
|
||||||
fixtures[fixture] = {"body": body, "headers": headers}
|
fixtures[fixture] = {"body": body, "headers": headers}
|
||||||
|
|
|
@ -382,7 +382,8 @@ def integration_doc(request: HttpRequest, *, integration_name: PathOnly[str]) ->
|
||||||
context["integration_display_name"] = integration.display_name
|
context["integration_display_name"] = integration.display_name
|
||||||
context["recommended_channel_name"] = integration.stream_name
|
context["recommended_channel_name"] = integration.stream_name
|
||||||
if isinstance(integration, WebhookIntegration):
|
if isinstance(integration, WebhookIntegration):
|
||||||
context["integration_url"] = integration.url[3:]
|
assert integration.url.startswith("api/")
|
||||||
|
context["integration_url"] = integration.url.removeprefix("api")
|
||||||
all_event_types = get_all_event_types_for_integration(integration)
|
all_event_types = get_all_event_types_for_integration(integration)
|
||||||
if all_event_types is not None:
|
if all_event_types is not None:
|
||||||
context["all_event_types"] = all_event_types
|
context["all_event_types"] = all_event_types
|
||||||
|
|
|
@ -105,8 +105,7 @@ def same_realm_irc_user(user_profile: UserProfile, email: str) -> bool:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
domain = Address(addr_spec=email).domain.lower()
|
domain = Address(addr_spec=email).domain.lower()
|
||||||
if domain.startswith("irc."):
|
domain = domain.removeprefix("irc.")
|
||||||
domain = domain[len("irc.") :]
|
|
||||||
|
|
||||||
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
|
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
|
||||||
# these realms.
|
# these realms.
|
||||||
|
|
|
@ -92,9 +92,9 @@ def repo_comment_handler(
|
||||||
repo_name = payload["repository"]["name"].tame(check_string)
|
repo_name = payload["repository"]["name"].tame(check_string)
|
||||||
topic_name = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=repo_name)
|
topic_name = BITBUCKET_TOPIC_TEMPLATE.format(repository_name=repo_name)
|
||||||
sha = payload["commit"].tame(check_string)
|
sha = payload["commit"].tame(check_string)
|
||||||
commit_url = payload["repository"]["links"]["self"][0]["href"].tame(check_string)[
|
commit_url = (
|
||||||
: -len("browse")
|
payload["repository"]["links"]["self"][0]["href"].tame(check_string).removesuffix("browse")
|
||||||
]
|
)
|
||||||
commit_url += f"commits/{sha}"
|
commit_url += f"commits/{sha}"
|
||||||
message = payload["comment"]["text"].tame(check_string)
|
message = payload["comment"]["text"].tame(check_string)
|
||||||
if action == "deleted their comment":
|
if action == "deleted their comment":
|
||||||
|
|
|
@ -26,7 +26,7 @@ def dict_list_to_string(some_list: WildValue) -> str:
|
||||||
elif item_type and item_url:
|
elif item_type and item_url:
|
||||||
internal_template += f"[{item_type}]({item_url}), "
|
internal_template += f"[{item_type}]({item_url}), "
|
||||||
|
|
||||||
internal_template = internal_template[:-2]
|
internal_template = internal_template.removesuffix(", ")
|
||||||
return internal_template
|
return internal_template
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue