2022-12-14 21:51:37 +01:00
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import secrets
|
2024-07-12 02:30:25 +02:00
|
|
|
from collections.abc import Callable, Iterator
|
2023-02-27 20:55:33 +01:00
|
|
|
from datetime import datetime
|
2024-07-12 02:30:25 +02:00
|
|
|
from typing import IO, Any, BinaryIO, Literal
|
2023-12-05 21:14:17 +01:00
|
|
|
from urllib.parse import urljoin, urlsplit, urlunsplit
|
2022-12-14 21:51:37 +01:00
|
|
|
|
|
|
|
import boto3
|
|
|
|
import botocore
|
|
|
|
from botocore.client import Config
|
|
|
|
from django.conf import settings
|
2024-06-25 21:03:49 +02:00
|
|
|
from mypy_boto3_s3.service_resource import Bucket
|
2023-10-12 19:43:45 +02:00
|
|
|
from typing_extensions import override
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2024-06-25 21:03:49 +02:00
|
|
|
from zerver.lib.thumbnail import resize_avatar, resize_logo
|
2024-06-20 18:11:59 +02:00
|
|
|
from zerver.lib.upload.base import INLINE_MIME_TYPES, ZulipUploadBackend
|
2022-12-14 21:51:37 +01:00
|
|
|
from zerver.models import Realm, RealmEmoji, UserProfile
|
|
|
|
|
|
|
|
# Duration that the signed upload URLs that we redirect to when
|
|
|
|
# accessing uploaded files are available for clients to fetch before
|
|
|
|
# they expire.
|
|
|
|
SIGNED_UPLOAD_URL_DURATION = 60
|
|
|
|
|
|
|
|
# Performance note:
|
|
|
|
#
|
|
|
|
# For writing files to S3, the file could either be stored in RAM
|
|
|
|
# (if it is less than 2.5MiB or so) or an actual temporary file on disk.
|
|
|
|
#
|
|
|
|
# Because we set FILE_UPLOAD_MAX_MEMORY_SIZE to 0, only the latter case
|
|
|
|
# should occur in practice.
|
|
|
|
#
|
|
|
|
# This is great, because passing the pseudofile object that Django gives
|
|
|
|
# you to boto would be a pain.
|
|
|
|
|
|
|
|
# To come up with a s3 key we randomly generate a "directory". The
|
|
|
|
# "file name" is the original filename provided by the user run
|
|
|
|
# through a sanitization function.
|
|
|
|
|
|
|
|
|
|
|
|
# https://github.com/boto/botocore/issues/2644 means that the IMDS
|
|
|
|
# request _always_ pulls from the environment. Monkey-patch the
|
|
|
|
# `should_bypass_proxies` function if we need to skip them, based
|
|
|
|
# on S3_SKIP_PROXY.
|
|
|
|
if settings.S3_SKIP_PROXY is True: # nocoverage
|
|
|
|
botocore.utils.should_bypass_proxies = lambda url: True
|
|
|
|
|
|
|
|
|
2024-01-03 21:26:23 +01:00
|
|
|
def get_bucket(bucket_name: str, authed: bool = True) -> Bucket:
|
|
|
|
return boto3.resource(
|
|
|
|
"s3",
|
|
|
|
aws_access_key_id=settings.S3_KEY if authed else None,
|
|
|
|
aws_secret_access_key=settings.S3_SECRET_KEY if authed else None,
|
|
|
|
region_name=settings.S3_REGION,
|
|
|
|
endpoint_url=settings.S3_ENDPOINT_URL,
|
|
|
|
config=Config(
|
|
|
|
signature_version=None if authed else botocore.UNSIGNED,
|
2024-01-03 21:27:53 +01:00
|
|
|
s3={"addressing_style": settings.S3_ADDRESSING_STYLE},
|
2024-01-03 21:26:23 +01:00
|
|
|
),
|
2022-12-14 21:51:37 +01:00
|
|
|
).Bucket(bucket_name)
|
|
|
|
|
|
|
|
|
|
|
|
def upload_image_to_s3(
|
|
|
|
bucket: Bucket,
|
|
|
|
file_name: str,
|
2024-07-12 02:30:23 +02:00
|
|
|
content_type: str | None,
|
2024-06-20 23:58:27 +02:00
|
|
|
user_profile: UserProfile | None,
|
2022-12-14 21:51:37 +01:00
|
|
|
contents: bytes,
|
2024-06-13 14:57:18 +02:00
|
|
|
*,
|
2023-07-19 04:27:03 +02:00
|
|
|
storage_class: Literal[
|
|
|
|
"GLACIER_IR",
|
|
|
|
"INTELLIGENT_TIERING",
|
|
|
|
"ONEZONE_IA",
|
|
|
|
"REDUCED_REDUNDANCY",
|
|
|
|
"STANDARD",
|
|
|
|
"STANDARD_IA",
|
|
|
|
] = "STANDARD",
|
2024-07-12 02:30:23 +02:00
|
|
|
cache_control: str | None = None,
|
|
|
|
extra_metadata: dict[str, str] | None = None,
|
2022-12-14 21:51:37 +01:00
|
|
|
) -> None:
|
|
|
|
key = bucket.Object(file_name)
|
2024-06-20 23:58:27 +02:00
|
|
|
metadata: dict[str, str] = {}
|
|
|
|
if user_profile:
|
|
|
|
metadata["user_profile_id"] = str(user_profile.id)
|
|
|
|
metadata["realm_id"] = str(user_profile.realm_id)
|
2024-06-13 14:57:18 +02:00
|
|
|
if extra_metadata is not None:
|
|
|
|
metadata.update(extra_metadata)
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2024-06-29 01:44:30 +02:00
|
|
|
extras = {}
|
2024-07-10 22:54:47 +02:00
|
|
|
if content_type is None: # nocoverage
|
2022-12-14 21:51:37 +01:00
|
|
|
content_type = ""
|
|
|
|
if content_type not in INLINE_MIME_TYPES:
|
2024-06-29 01:44:30 +02:00
|
|
|
extras["ContentDisposition"] = "attachment"
|
2024-06-18 18:26:01 +02:00
|
|
|
if cache_control is not None:
|
|
|
|
extras["CacheControl"] = cache_control
|
2022-12-14 21:51:37 +01:00
|
|
|
|
|
|
|
key.put(
|
|
|
|
Body=contents,
|
|
|
|
Metadata=metadata,
|
|
|
|
ContentType=content_type,
|
2023-07-19 04:27:03 +02:00
|
|
|
StorageClass=storage_class,
|
2024-06-29 01:44:30 +02:00
|
|
|
**extras, # type: ignore[arg-type] # The dynamic kwargs here confuse mypy.
|
2022-12-14 21:51:37 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
|
CVE-2023-22735: Provide the Content-Disposition header from S3.
The Content-Type of user-provided uploads was provided by the browser
at initial upload time, and stored in S3; however, 04cf68b45ebb
switched to determining the Content-Disposition merely from the
filename. This makes uploads vulnerable to a stored XSS, wherein a
file uploaded with a content-type of `text/html` and an extension of
`.png` would be served to browsers as `Content-Disposition: inline`,
which is unsafe.
The `Content-Security-Policy` headers in the previous commit mitigate
this, but only for browsers which support them.
Revert parts of 04cf68b45ebb, specifically by allowing S3 to provide
the Content-Disposition header, and using the
`ResponseContentDisposition` argument when necessary to override it to
`attachment`. Because we expect S3 responses to vary based on this
argument, we include it in the cache key; since the query parameter
has dashes in it, we can't use use the helper `$arg_` variables, and
must parse it from the query parameters manually.
Adding the disposition may decrease the cache hit rate somewhat, but
downloads are infrequent enough that it is unlikely to have a
noticeable effect. We take care to not adjust the cache key for
requests which do not specify the disposition.
2023-01-11 17:36:41 +01:00
|
|
|
def get_signed_upload_url(path: str, force_download: bool = False) -> str:
|
2024-01-29 00:32:21 +01:00
|
|
|
client = get_bucket(settings.S3_AUTH_UPLOADS_BUCKET).meta.client
|
CVE-2023-22735: Provide the Content-Disposition header from S3.
The Content-Type of user-provided uploads was provided by the browser
at initial upload time, and stored in S3; however, 04cf68b45ebb
switched to determining the Content-Disposition merely from the
filename. This makes uploads vulnerable to a stored XSS, wherein a
file uploaded with a content-type of `text/html` and an extension of
`.png` would be served to browsers as `Content-Disposition: inline`,
which is unsafe.
The `Content-Security-Policy` headers in the previous commit mitigate
this, but only for browsers which support them.
Revert parts of 04cf68b45ebb, specifically by allowing S3 to provide
the Content-Disposition header, and using the
`ResponseContentDisposition` argument when necessary to override it to
`attachment`. Because we expect S3 responses to vary based on this
argument, we include it in the cache key; since the query parameter
has dashes in it, we can't use use the helper `$arg_` variables, and
must parse it from the query parameters manually.
Adding the disposition may decrease the cache hit rate somewhat, but
downloads are infrequent enough that it is unlikely to have a
noticeable effect. We take care to not adjust the cache key for
requests which do not specify the disposition.
2023-01-11 17:36:41 +01:00
|
|
|
params = {
|
|
|
|
"Bucket": settings.S3_AUTH_UPLOADS_BUCKET,
|
|
|
|
"Key": path,
|
|
|
|
}
|
|
|
|
if force_download:
|
|
|
|
params["ResponseContentDisposition"] = "attachment"
|
2022-12-14 21:51:37 +01:00
|
|
|
|
|
|
|
return client.generate_presigned_url(
|
|
|
|
ClientMethod="get_object",
|
CVE-2023-22735: Provide the Content-Disposition header from S3.
The Content-Type of user-provided uploads was provided by the browser
at initial upload time, and stored in S3; however, 04cf68b45ebb
switched to determining the Content-Disposition merely from the
filename. This makes uploads vulnerable to a stored XSS, wherein a
file uploaded with a content-type of `text/html` and an extension of
`.png` would be served to browsers as `Content-Disposition: inline`,
which is unsafe.
The `Content-Security-Policy` headers in the previous commit mitigate
this, but only for browsers which support them.
Revert parts of 04cf68b45ebb, specifically by allowing S3 to provide
the Content-Disposition header, and using the
`ResponseContentDisposition` argument when necessary to override it to
`attachment`. Because we expect S3 responses to vary based on this
argument, we include it in the cache key; since the query parameter
has dashes in it, we can't use use the helper `$arg_` variables, and
must parse it from the query parameters manually.
Adding the disposition may decrease the cache hit rate somewhat, but
downloads are infrequent enough that it is unlikely to have a
noticeable effect. We take care to not adjust the cache key for
requests which do not specify the disposition.
2023-01-11 17:36:41 +01:00
|
|
|
Params=params,
|
2022-12-14 21:51:37 +01:00
|
|
|
ExpiresIn=SIGNED_UPLOAD_URL_DURATION,
|
|
|
|
HttpMethod="GET",
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class S3UploadBackend(ZulipUploadBackend):
|
|
|
|
def __init__(self) -> None:
|
2024-01-03 21:26:23 +01:00
|
|
|
self.avatar_bucket = get_bucket(settings.S3_AVATAR_BUCKET)
|
|
|
|
self.uploads_bucket = get_bucket(settings.S3_AUTH_UPLOADS_BUCKET)
|
2022-12-14 21:51:37 +01:00
|
|
|
self.public_upload_url_base = self.construct_public_upload_url_base()
|
|
|
|
|
2023-02-28 04:33:25 +01:00
|
|
|
def delete_file_from_s3(self, path_id: str, bucket: Bucket) -> bool:
|
|
|
|
key = bucket.Object(path_id)
|
|
|
|
|
|
|
|
try:
|
|
|
|
key.load()
|
|
|
|
except botocore.exceptions.ClientError:
|
|
|
|
file_name = path_id.split("/")[-1]
|
|
|
|
logging.warning(
|
|
|
|
"%s does not exist. Its entry in the database will be removed.", file_name
|
|
|
|
)
|
|
|
|
return False
|
|
|
|
key.delete()
|
|
|
|
return True
|
|
|
|
|
2022-12-14 21:51:37 +01:00
|
|
|
def construct_public_upload_url_base(self) -> str:
|
|
|
|
# Return the pattern for public URL for a key in the S3 Avatar bucket.
|
|
|
|
# For Amazon S3 itself, this will return the following:
|
|
|
|
# f"https://{self.avatar_bucket.name}.{network_location}/{key}"
|
|
|
|
#
|
|
|
|
# However, we need this function to properly handle S3 style
|
|
|
|
# file upload backends that Zulip supports, which can have a
|
|
|
|
# different URL format. Configuring no signature and providing
|
|
|
|
# no access key makes `generate_presigned_url` just return the
|
|
|
|
# normal public URL for a key.
|
|
|
|
#
|
|
|
|
# It unfortunately takes 2ms per query to call
|
2024-01-03 21:17:49 +01:00
|
|
|
# generate_presigned_url. Since we need to potentially compute
|
|
|
|
# hundreds of avatar URLs in single `GET /messages` request,
|
|
|
|
# we instead back-compute the URL pattern here.
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2024-06-18 19:19:17 +02:00
|
|
|
# The S3_AVATAR_PUBLIC_URL_PREFIX setting is used to override
|
|
|
|
# this prefix, for instance if a CloudFront distribution is
|
|
|
|
# used.
|
|
|
|
if settings.S3_AVATAR_PUBLIC_URL_PREFIX is not None:
|
|
|
|
prefix = settings.S3_AVATAR_PUBLIC_URL_PREFIX
|
|
|
|
if not prefix.endswith("/"):
|
|
|
|
prefix += "/"
|
|
|
|
return prefix
|
|
|
|
|
2022-12-14 21:51:37 +01:00
|
|
|
DUMMY_KEY = "dummy_key_ignored"
|
2024-01-03 21:26:23 +01:00
|
|
|
|
|
|
|
# We do not access self.avatar_bucket.meta.client directly,
|
|
|
|
# since that client is auth'd, and we want only the direct
|
|
|
|
# unauthed endpoint here.
|
2024-01-29 00:32:21 +01:00
|
|
|
client = get_bucket(self.avatar_bucket.name, authed=False).meta.client
|
2024-01-03 21:17:49 +01:00
|
|
|
dummy_signed_url = client.generate_presigned_url(
|
2022-12-14 21:51:37 +01:00
|
|
|
ClientMethod="get_object",
|
|
|
|
Params={
|
|
|
|
"Bucket": self.avatar_bucket.name,
|
|
|
|
"Key": DUMMY_KEY,
|
|
|
|
},
|
|
|
|
ExpiresIn=0,
|
|
|
|
)
|
2024-01-03 21:17:49 +01:00
|
|
|
split_url = urlsplit(dummy_signed_url)
|
2022-12-14 21:51:37 +01:00
|
|
|
assert split_url.path.endswith(f"/{DUMMY_KEY}")
|
|
|
|
|
2023-12-05 21:14:17 +01:00
|
|
|
return urlunsplit(
|
2022-12-14 21:51:37 +01:00
|
|
|
(split_url.scheme, split_url.netloc, split_url.path[: -len(DUMMY_KEY)], "", "")
|
|
|
|
)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2023-02-28 04:33:25 +01:00
|
|
|
def get_public_upload_root_url(self) -> str:
|
|
|
|
return self.public_upload_url_base
|
|
|
|
|
2022-12-14 21:51:37 +01:00
|
|
|
def get_public_upload_url(
|
|
|
|
self,
|
|
|
|
key: str,
|
|
|
|
) -> str:
|
|
|
|
assert not key.startswith("/")
|
2023-12-05 21:14:17 +01:00
|
|
|
return urljoin(self.public_upload_url_base, key)
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-06-20 18:11:59 +02:00
|
|
|
def generate_message_upload_path(self, realm_id: str, sanitized_file_name: str) -> str:
|
2022-12-14 21:51:37 +01:00
|
|
|
return "/".join(
|
|
|
|
[
|
|
|
|
realm_id,
|
|
|
|
secrets.token_urlsafe(18),
|
2024-06-20 18:11:59 +02:00
|
|
|
sanitized_file_name,
|
2022-12-14 21:51:37 +01:00
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2023-02-28 03:46:41 +01:00
|
|
|
def upload_message_attachment(
|
2022-12-14 21:51:37 +01:00
|
|
|
self,
|
2024-06-20 18:11:59 +02:00
|
|
|
path_id: str,
|
2024-06-20 18:19:25 +02:00
|
|
|
content_type: str,
|
2022-12-14 21:51:37 +01:00
|
|
|
file_data: bytes,
|
2024-06-20 23:58:27 +02:00
|
|
|
user_profile: UserProfile | None,
|
2024-06-20 18:11:59 +02:00
|
|
|
) -> None:
|
2022-12-14 21:51:37 +01:00
|
|
|
upload_image_to_s3(
|
|
|
|
self.uploads_bucket,
|
2024-06-20 18:11:59 +02:00
|
|
|
path_id,
|
2022-12-14 21:51:37 +01:00
|
|
|
content_type,
|
|
|
|
user_profile,
|
|
|
|
file_data,
|
2024-06-13 14:57:18 +02:00
|
|
|
storage_class=settings.S3_UPLOADS_STORAGE_CLASS,
|
2022-12-14 21:51:37 +01:00
|
|
|
)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2023-03-14 17:16:41 +01:00
|
|
|
def save_attachment_contents(self, path_id: str, filehandle: BinaryIO) -> None:
|
|
|
|
for chunk in self.uploads_bucket.Object(path_id).get()["Body"]:
|
|
|
|
filehandle.write(chunk)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2023-02-28 03:49:04 +01:00
|
|
|
def delete_message_attachment(self, path_id: str) -> bool:
|
2022-12-14 21:51:37 +01:00
|
|
|
return self.delete_file_from_s3(path_id, self.uploads_bucket)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-07-12 02:30:17 +02:00
|
|
|
def delete_message_attachments(self, path_ids: list[str]) -> None:
|
2023-02-28 04:44:29 +01:00
|
|
|
self.uploads_bucket.delete_objects(
|
|
|
|
Delete={"Objects": [{"Key": path_id} for path_id in path_ids]}
|
|
|
|
)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-06-20 23:58:27 +02:00
|
|
|
def all_message_attachments(
|
|
|
|
self, include_thumbnails: bool = False
|
|
|
|
) -> Iterator[tuple[str, datetime]]:
|
2024-01-03 21:26:23 +01:00
|
|
|
client = self.uploads_bucket.meta.client
|
2023-02-27 20:55:33 +01:00
|
|
|
paginator = client.get_paginator("list_objects_v2")
|
|
|
|
page_iterator = paginator.paginate(Bucket=self.uploads_bucket.name)
|
|
|
|
|
|
|
|
for page in page_iterator:
|
|
|
|
if page["KeyCount"] > 0:
|
|
|
|
for item in page["Contents"]:
|
2024-06-20 23:58:27 +02:00
|
|
|
if not include_thumbnails and item["Key"].startswith("thumbnail/"):
|
|
|
|
continue
|
2023-02-27 20:55:33 +01:00
|
|
|
yield (
|
|
|
|
item["Key"],
|
|
|
|
item["LastModified"],
|
|
|
|
)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-06-25 21:03:49 +02:00
|
|
|
def get_avatar_url(self, hash_key: str, medium: bool = False) -> str:
|
|
|
|
return self.get_public_upload_url(self.get_avatar_path(hash_key, medium))
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_avatar_contents(self, file_path: str) -> tuple[bytes, str]:
|
2024-06-25 21:03:49 +02:00
|
|
|
key = self.avatar_bucket.Object(file_path + ".original")
|
2022-12-14 21:51:37 +01:00
|
|
|
image_data = key.get()["Body"].read()
|
|
|
|
content_type = key.content_type
|
2024-06-25 21:03:49 +02:00
|
|
|
return image_data, content_type
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-06-25 21:03:49 +02:00
|
|
|
def upload_single_avatar_image(
|
|
|
|
self,
|
|
|
|
file_path: str,
|
|
|
|
*,
|
|
|
|
user_profile: UserProfile,
|
|
|
|
image_data: bytes,
|
2024-07-12 02:30:23 +02:00
|
|
|
content_type: str | None,
|
2024-06-13 14:57:18 +02:00
|
|
|
future: bool = True,
|
2024-06-25 21:03:49 +02:00
|
|
|
) -> None:
|
2024-06-13 14:57:18 +02:00
|
|
|
extra_metadata = {"avatar_version": str(user_profile.avatar_version + (1 if future else 0))}
|
2023-02-28 04:33:25 +01:00
|
|
|
upload_image_to_s3(
|
|
|
|
self.avatar_bucket,
|
2024-06-25 21:03:49 +02:00
|
|
|
file_path,
|
|
|
|
content_type,
|
2023-02-28 04:33:25 +01:00
|
|
|
user_profile,
|
2024-06-25 21:03:49 +02:00
|
|
|
image_data,
|
2024-06-13 14:57:18 +02:00
|
|
|
extra_metadata=extra_metadata,
|
|
|
|
cache_control="public, max-age=31536000, immutable",
|
2023-02-28 04:33:25 +01:00
|
|
|
)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-06-25 21:03:49 +02:00
|
|
|
def delete_avatar_image(self, path_id: str) -> None:
|
2023-02-28 04:33:25 +01:00
|
|
|
self.delete_file_from_s3(path_id + ".original", self.avatar_bucket)
|
2024-06-25 21:03:49 +02:00
|
|
|
self.delete_file_from_s3(self.get_avatar_path(path_id, True), self.avatar_bucket)
|
|
|
|
self.delete_file_from_s3(self.get_avatar_path(path_id, False), self.avatar_bucket)
|
2023-02-28 04:33:25 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2023-02-28 04:33:25 +01:00
|
|
|
def get_realm_icon_url(self, realm_id: int, version: int) -> str:
|
|
|
|
public_url = self.get_public_upload_url(f"{realm_id}/realm/icon.png")
|
|
|
|
return public_url + f"?version={version}"
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-07-10 22:54:47 +02:00
|
|
|
def upload_realm_icon_image(
|
|
|
|
self, icon_file: IO[bytes], user_profile: UserProfile, content_type: str
|
|
|
|
) -> None:
|
2022-12-14 21:51:37 +01:00
|
|
|
s3_file_name = os.path.join(self.realm_avatar_and_logo_path(user_profile.realm), "icon")
|
|
|
|
|
|
|
|
image_data = icon_file.read()
|
|
|
|
upload_image_to_s3(
|
|
|
|
self.avatar_bucket,
|
|
|
|
s3_file_name + ".original",
|
|
|
|
content_type,
|
|
|
|
user_profile,
|
|
|
|
image_data,
|
|
|
|
)
|
|
|
|
|
|
|
|
resized_data = resize_avatar(image_data)
|
|
|
|
upload_image_to_s3(
|
|
|
|
self.avatar_bucket,
|
|
|
|
s3_file_name + ".png",
|
|
|
|
"image/png",
|
|
|
|
user_profile,
|
|
|
|
resized_data,
|
|
|
|
)
|
|
|
|
# See avatar_url in avatar.py for URL. (That code also handles the case
|
|
|
|
# that users use gravatar.)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2023-02-28 04:33:25 +01:00
|
|
|
def get_realm_logo_url(self, realm_id: int, version: int, night: bool) -> str:
|
|
|
|
if not night:
|
|
|
|
file_name = "logo.png"
|
|
|
|
else:
|
|
|
|
file_name = "night_logo.png"
|
|
|
|
public_url = self.get_public_upload_url(f"{realm_id}/realm/{file_name}")
|
2022-12-14 21:51:37 +01:00
|
|
|
return public_url + f"?version={version}"
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2022-12-14 21:51:37 +01:00
|
|
|
def upload_realm_logo_image(
|
2024-07-10 22:54:47 +02:00
|
|
|
self, logo_file: IO[bytes], user_profile: UserProfile, night: bool, content_type: str
|
2022-12-14 21:51:37 +01:00
|
|
|
) -> None:
|
|
|
|
if night:
|
|
|
|
basename = "night_logo"
|
|
|
|
else:
|
|
|
|
basename = "logo"
|
|
|
|
s3_file_name = os.path.join(self.realm_avatar_and_logo_path(user_profile.realm), basename)
|
|
|
|
|
|
|
|
image_data = logo_file.read()
|
|
|
|
upload_image_to_s3(
|
|
|
|
self.avatar_bucket,
|
|
|
|
s3_file_name + ".original",
|
|
|
|
content_type,
|
|
|
|
user_profile,
|
|
|
|
image_data,
|
|
|
|
)
|
|
|
|
|
|
|
|
resized_data = resize_logo(image_data)
|
|
|
|
upload_image_to_s3(
|
|
|
|
self.avatar_bucket,
|
|
|
|
s3_file_name + ".png",
|
|
|
|
"image/png",
|
|
|
|
user_profile,
|
|
|
|
resized_data,
|
|
|
|
)
|
|
|
|
# See avatar_url in avatar.py for URL. (That code also handles the case
|
|
|
|
# that users use gravatar.)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2023-02-28 04:33:25 +01:00
|
|
|
def get_emoji_url(self, emoji_file_name: str, realm_id: int, still: bool = False) -> str:
|
|
|
|
if still:
|
|
|
|
emoji_path = RealmEmoji.STILL_PATH_ID_TEMPLATE.format(
|
|
|
|
realm_id=realm_id,
|
|
|
|
emoji_filename_without_extension=os.path.splitext(emoji_file_name)[0],
|
|
|
|
)
|
|
|
|
return self.get_public_upload_url(emoji_path)
|
2022-12-14 21:51:37 +01:00
|
|
|
else:
|
2023-02-28 04:33:25 +01:00
|
|
|
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
|
|
|
|
realm_id=realm_id, emoji_file_name=emoji_file_name
|
|
|
|
)
|
|
|
|
return self.get_public_upload_url(emoji_path)
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-06-24 22:25:16 +02:00
|
|
|
def upload_single_emoji_image(
|
2024-07-12 02:30:23 +02:00
|
|
|
self, path: str, content_type: str | None, user_profile: UserProfile, image_data: bytes
|
2024-06-24 22:25:16 +02:00
|
|
|
) -> None:
|
2022-12-14 21:51:37 +01:00
|
|
|
upload_image_to_s3(
|
|
|
|
self.avatar_bucket,
|
2024-06-24 22:25:16 +02:00
|
|
|
path,
|
2022-12-14 21:51:37 +01:00
|
|
|
content_type,
|
|
|
|
user_profile,
|
|
|
|
image_data,
|
2024-06-18 18:27:00 +02:00
|
|
|
cache_control="public, max-age=31536000, immutable",
|
2022-12-14 21:51:37 +01:00
|
|
|
)
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2023-02-28 04:33:25 +01:00
|
|
|
def get_export_tarball_url(self, realm: Realm, export_path: str) -> str:
|
|
|
|
# export_path has a leading /
|
|
|
|
return self.get_public_upload_url(export_path[1:])
|
2022-12-14 21:51:37 +01:00
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2022-12-14 21:51:37 +01:00
|
|
|
def upload_export_tarball(
|
|
|
|
self,
|
2024-07-12 02:30:23 +02:00
|
|
|
realm: Realm | None,
|
2022-12-14 21:51:37 +01:00
|
|
|
tarball_path: str,
|
2024-07-12 02:30:23 +02:00
|
|
|
percent_callback: Callable[[Any], None] | None = None,
|
2022-12-14 21:51:37 +01:00
|
|
|
) -> str:
|
|
|
|
# We use the avatar bucket, because it's world-readable.
|
|
|
|
key = self.avatar_bucket.Object(
|
|
|
|
os.path.join("exports", secrets.token_hex(16), os.path.basename(tarball_path))
|
|
|
|
)
|
|
|
|
|
|
|
|
if percent_callback is None:
|
|
|
|
key.upload_file(Filename=tarball_path)
|
|
|
|
else:
|
|
|
|
key.upload_file(Filename=tarball_path, Callback=percent_callback)
|
|
|
|
|
|
|
|
public_url = self.get_public_upload_url(key.key)
|
|
|
|
return public_url
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2024-07-12 02:30:23 +02:00
|
|
|
def delete_export_tarball(self, export_path: str) -> str | None:
|
2022-12-14 21:51:37 +01:00
|
|
|
assert export_path.startswith("/")
|
|
|
|
path_id = export_path[1:]
|
|
|
|
if self.delete_file_from_s3(path_id, self.avatar_bucket):
|
|
|
|
return export_path
|
|
|
|
return None
|