2024-06-12 20:19:15 +02:00
|
|
|
import logging
|
|
|
|
import os
|
2024-06-20 23:58:27 +02:00
|
|
|
import re
|
2024-07-12 02:30:25 +02:00
|
|
|
from collections.abc import Iterator
|
2024-06-12 20:19:15 +02:00
|
|
|
from contextlib import contextmanager
|
2024-06-20 23:58:27 +02:00
|
|
|
from dataclasses import dataclass
|
|
|
|
from typing import TypeVar
|
2020-06-11 00:54:34 +02:00
|
|
|
|
2024-06-12 20:19:15 +02:00
|
|
|
import pyvips
|
2024-06-21 21:02:36 +02:00
|
|
|
from bs4 import BeautifulSoup
|
2024-09-05 22:21:51 +02:00
|
|
|
from bs4.formatter import EntitySubstitution, HTMLFormatter
|
2024-06-12 16:56:41 +02:00
|
|
|
from django.utils.translation import gettext as _
|
2024-06-20 23:58:27 +02:00
|
|
|
from typing_extensions import override
|
2018-03-08 09:37:09 +01:00
|
|
|
|
2024-06-12 16:56:41 +02:00
|
|
|
from zerver.lib.exceptions import ErrorCode, JsonableError
|
2024-06-20 23:58:27 +02:00
|
|
|
from zerver.lib.queue import queue_event_on_commit
|
|
|
|
from zerver.models import AbstractAttachment, ImageAttachment
|
2024-06-12 16:56:41 +02:00
|
|
|
|
|
|
|
DEFAULT_AVATAR_SIZE = 100
|
|
|
|
MEDIUM_AVATAR_SIZE = 500
|
|
|
|
DEFAULT_EMOJI_SIZE = 64
|
|
|
|
|
2024-06-12 20:19:15 +02:00
|
|
|
# We refuse to deal with any image whose total pixelcount exceeds this.
|
|
|
|
IMAGE_BOMB_TOTAL_PIXELS = 90000000
|
|
|
|
|
|
|
|
# Reject emoji which, after resizing, have stills larger than this
|
|
|
|
MAX_EMOJI_GIF_FILE_SIZE_BYTES = 128 * 1024 # 128 kb
|
2024-06-12 16:56:41 +02:00
|
|
|
|
2024-06-20 23:58:27 +02:00
|
|
|
T = TypeVar("T", bound="BaseThumbnailFormat")
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass(frozen=True)
|
|
|
|
class BaseThumbnailFormat:
|
|
|
|
extension: str
|
|
|
|
max_width: int
|
|
|
|
max_height: int
|
|
|
|
animated: bool
|
|
|
|
|
|
|
|
@override
|
|
|
|
def __eq__(self, other: object) -> bool:
|
|
|
|
if not isinstance(other, BaseThumbnailFormat):
|
|
|
|
return False
|
|
|
|
return str(self) == str(other)
|
|
|
|
|
|
|
|
@override
|
|
|
|
def __str__(self) -> str:
|
|
|
|
animated = "-anim" if self.animated else ""
|
|
|
|
return f"{self.max_width}x{self.max_height}{animated}.{self.extension}"
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_string(cls: type[T], format_string: str) -> T | None:
|
|
|
|
format_parts = re.match(r"(\d+)x(\d+)(-anim)?\.(\w+)$", format_string)
|
|
|
|
if format_parts is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
return cls(
|
|
|
|
max_width=int(format_parts[1]),
|
|
|
|
max_height=int(format_parts[2]),
|
|
|
|
animated=format_parts[3] is not None,
|
|
|
|
extension=format_parts[4],
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass(frozen=True, eq=False)
|
|
|
|
class ThumbnailFormat(BaseThumbnailFormat):
|
|
|
|
opts: str | None = ""
|
|
|
|
|
|
|
|
|
|
|
|
# Note that this is serialized into a JSONB column in the database,
|
|
|
|
# and as such fields cannot be removed without a migration.
|
|
|
|
@dataclass(frozen=True, eq=False)
|
|
|
|
class StoredThumbnailFormat(BaseThumbnailFormat):
|
|
|
|
content_type: str
|
|
|
|
width: int
|
|
|
|
height: int
|
|
|
|
byte_size: int
|
|
|
|
|
|
|
|
|
|
|
|
# Formats that we generate; the first animated and non-animated
|
|
|
|
# options on this list are the ones which are written into
|
|
|
|
# rendered_content.
|
|
|
|
THUMBNAIL_OUTPUT_FORMATS = [
|
2024-07-24 18:25:02 +02:00
|
|
|
# We generate relatively large default "thumbnails", so that
|
|
|
|
# clients that do not understand the thumbnailing protocol
|
|
|
|
# (e.g. mobile) get something which does not look pixelated. This
|
|
|
|
# is also useful when the web client lightbox temporarily shows an
|
|
|
|
# upsized thumbnail while loading the full resolution image.
|
2024-06-20 23:58:27 +02:00
|
|
|
ThumbnailFormat("webp", 840, 560, animated=True),
|
|
|
|
ThumbnailFormat("webp", 840, 560, animated=False),
|
|
|
|
]
|
|
|
|
|
2024-06-12 16:56:41 +02:00
|
|
|
|
2024-07-11 03:40:35 +02:00
|
|
|
# These are the image content-types which the server supports parsing
|
|
|
|
# and thumbnailing; these do not need to supported on all browsers,
|
|
|
|
# since we will the serving thumbnailed versions of them. Note that
|
|
|
|
# this does not provide any *security*, since the content-type is
|
|
|
|
# provided by the browser, and may not match the bytes they uploaded.
|
|
|
|
#
|
|
|
|
# This should be kept synced with the client-side image-picker in
|
2024-07-11 04:50:44 +02:00
|
|
|
# web/upload_widget.ts. Any additions below must be accompanied by
|
|
|
|
# changes to the pyvips block below as well.
|
2024-07-11 03:40:35 +02:00
|
|
|
THUMBNAIL_ACCEPT_IMAGE_TYPES = frozenset(
|
|
|
|
[
|
|
|
|
"image/avif",
|
|
|
|
"image/gif",
|
|
|
|
"image/heic",
|
|
|
|
"image/jpeg",
|
|
|
|
"image/png",
|
|
|
|
"image/tiff",
|
|
|
|
"image/webp",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2024-07-11 04:50:44 +02:00
|
|
|
# This is what enforces security limitations on which formats are
|
|
|
|
# parsed; we disable all loaders, then re-enable the ones we support
|
|
|
|
# -- then explicitly disable any "untrusted" ones, in case libvips for
|
|
|
|
# some reason marks one of the above formats as such (because they are
|
|
|
|
# no longer fuzzed, for instance).
|
|
|
|
#
|
2024-07-29 23:50:06 +02:00
|
|
|
# Note that only libvips >= 8.13 (Ubuntu 24.04 or later, Debian 12 or
|
2024-07-11 04:50:44 +02:00
|
|
|
# later) supports this! These are no-ops on earlier versions of libvips.
|
|
|
|
pyvips.operation_block_set("VipsForeignLoad", True)
|
|
|
|
pyvips.operation_block_set("VipsForeignLoadHeif", False) # image/avif, image/heic
|
|
|
|
pyvips.operation_block_set("VipsForeignLoadNsgif", False) # image/gif
|
|
|
|
pyvips.operation_block_set("VipsForeignLoadJpeg", False) # image/jpeg
|
|
|
|
pyvips.operation_block_set("VipsForeignLoadPng", False) # image/png
|
|
|
|
pyvips.operation_block_set("VipsForeignLoadTiff", False) # image/tiff
|
|
|
|
pyvips.operation_block_set("VipsForeignLoadWebp", False) # image/webp
|
|
|
|
pyvips.block_untrusted_set(True)
|
|
|
|
|
2024-07-22 18:11:13 +02:00
|
|
|
# Disable the operations cache; our only use here is thumbnail_buffer,
|
|
|
|
# which does not make use of it.
|
|
|
|
pyvips.voperation.cache_set_max(0)
|
|
|
|
|
2024-07-11 03:40:35 +02:00
|
|
|
|
2024-06-12 16:56:41 +02:00
|
|
|
class BadImageError(JsonableError):
|
|
|
|
code = ErrorCode.BAD_IMAGE
|
2018-03-08 09:37:09 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-06-12 20:19:15 +02:00
|
|
|
@contextmanager
|
|
|
|
def libvips_check_image(image_data: bytes) -> Iterator[pyvips.Image]:
|
|
|
|
# The primary goal of this is to verify that the image is valid,
|
|
|
|
# and raise BadImageError otherwise. The yielded `source_image`
|
|
|
|
# may be ignored, since calling `thumbnail_buffer` is faster than
|
|
|
|
# calling `thumbnail_image` on a pyvips.Image, since the latter
|
|
|
|
# cannot make use of shrink-on-load optimizations:
|
|
|
|
# https://www.libvips.org/API/current/libvips-resample.html#vips-thumbnail-image
|
2024-06-12 16:56:41 +02:00
|
|
|
try:
|
2024-06-12 20:19:15 +02:00
|
|
|
source_image = pyvips.Image.new_from_buffer(image_data, "")
|
|
|
|
except pyvips.Error:
|
2024-06-12 16:56:41 +02:00
|
|
|
raise BadImageError(_("Could not decode image; did you upload an image file?"))
|
|
|
|
|
2024-07-19 20:19:28 +02:00
|
|
|
if (
|
|
|
|
source_image.width * source_image.height * source_image.get_n_pages()
|
|
|
|
> IMAGE_BOMB_TOTAL_PIXELS
|
|
|
|
):
|
2024-06-12 20:19:15 +02:00
|
|
|
raise BadImageError(_("Image size exceeds limit."))
|
2024-06-12 16:56:41 +02:00
|
|
|
|
|
|
|
try:
|
2024-06-12 20:19:15 +02:00
|
|
|
yield source_image
|
|
|
|
except pyvips.Error as e: # nocoverage
|
|
|
|
logging.exception(e)
|
2024-07-11 03:38:30 +02:00
|
|
|
raise BadImageError(_("Image is corrupted or truncated"))
|
2024-06-12 20:19:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
def resize_avatar(image_data: bytes, size: int = DEFAULT_AVATAR_SIZE) -> bytes:
|
|
|
|
# This will scale up, if necessary, and will scale the smallest
|
|
|
|
# dimension to fit. That is, a 1x1000 image will end up with the
|
|
|
|
# one middle pixel enlarged to fill the full square.
|
|
|
|
with libvips_check_image(image_data):
|
|
|
|
return pyvips.Image.thumbnail_buffer(
|
|
|
|
image_data,
|
|
|
|
size,
|
|
|
|
height=size,
|
|
|
|
crop=pyvips.Interesting.CENTRE,
|
|
|
|
).write_to_buffer(".png")
|
|
|
|
|
2024-06-12 16:56:41 +02:00
|
|
|
|
2024-06-12 20:19:15 +02:00
|
|
|
def resize_logo(image_data: bytes) -> bytes:
|
|
|
|
# This will only scale the image down, and will resize it to
|
|
|
|
# preserve aspect ratio and be contained within 8*AVATAR by AVATAR
|
|
|
|
# pixels; it does not add any padding to make it exactly that
|
|
|
|
# size. A 1000x10 pixel image will end up as 800x8; a 10x10 will
|
|
|
|
# end up 10x10.
|
|
|
|
with libvips_check_image(image_data):
|
|
|
|
return pyvips.Image.thumbnail_buffer(
|
|
|
|
image_data,
|
|
|
|
8 * DEFAULT_AVATAR_SIZE,
|
|
|
|
height=DEFAULT_AVATAR_SIZE,
|
|
|
|
size=pyvips.Size.DOWN,
|
|
|
|
).write_to_buffer(".png")
|
2024-06-12 16:56:41 +02:00
|
|
|
|
|
|
|
|
|
|
|
def resize_emoji(
|
2024-06-12 20:19:15 +02:00
|
|
|
image_data: bytes, emoji_file_name: str, size: int = DEFAULT_EMOJI_SIZE
|
2024-07-12 02:30:23 +02:00
|
|
|
) -> tuple[bytes, bytes | None]:
|
2024-06-12 20:19:15 +02:00
|
|
|
# Square brackets are used for providing options to libvips' save
|
2024-07-11 21:10:17 +02:00
|
|
|
# operation; the extension on the filename comes from reversing
|
|
|
|
# the content-type, which removes most of the attacker control of
|
|
|
|
# this string, but assert it has no bracketed pieces for safety.
|
2024-06-12 20:19:15 +02:00
|
|
|
write_file_ext = os.path.splitext(emoji_file_name)[1]
|
|
|
|
assert "[" not in write_file_ext
|
|
|
|
|
2024-06-13 06:11:30 +02:00
|
|
|
# This function returns two values:
|
2024-06-12 16:56:41 +02:00
|
|
|
# 1) Emoji image data.
|
2024-06-13 06:11:30 +02:00
|
|
|
# 2) If it is animated, the still image data i.e. first frame of gif.
|
2024-06-12 20:19:15 +02:00
|
|
|
with libvips_check_image(image_data) as source_image:
|
|
|
|
if source_image.get_n_pages() == 1:
|
|
|
|
return (
|
|
|
|
pyvips.Image.thumbnail_buffer(
|
|
|
|
image_data,
|
|
|
|
size,
|
|
|
|
height=size,
|
|
|
|
crop=pyvips.Interesting.CENTRE,
|
|
|
|
).write_to_buffer(write_file_ext),
|
|
|
|
None,
|
2024-06-12 16:56:41 +02:00
|
|
|
)
|
2024-06-12 20:19:15 +02:00
|
|
|
first_still = pyvips.Image.thumbnail_buffer(
|
|
|
|
image_data,
|
|
|
|
size,
|
|
|
|
height=size,
|
|
|
|
crop=pyvips.Interesting.CENTRE,
|
|
|
|
).write_to_buffer(".png")
|
|
|
|
|
|
|
|
animated = pyvips.Image.thumbnail_buffer(
|
|
|
|
image_data,
|
|
|
|
size,
|
|
|
|
height=size,
|
|
|
|
# This is passed to the loader, and means "load all
|
|
|
|
# frames", instead of the default of just the first
|
|
|
|
option_string="n=-1",
|
|
|
|
)
|
|
|
|
if animated.width != animated.get("page-height"):
|
|
|
|
# If the image is non-square, we have to iterate the
|
|
|
|
# frames to add padding to make it so
|
|
|
|
if not animated.hasalpha():
|
|
|
|
animated = animated.addalpha()
|
|
|
|
frames = [
|
|
|
|
frame.gravity(
|
|
|
|
pyvips.CompassDirection.CENTRE,
|
|
|
|
size,
|
|
|
|
size,
|
|
|
|
extend=pyvips.Extend.BACKGROUND,
|
|
|
|
background=[0, 0, 0, 0],
|
|
|
|
)
|
|
|
|
for frame in animated.pagesplit()
|
|
|
|
]
|
|
|
|
animated = frames[0].pagejoin(frames[1:])
|
2024-06-13 06:11:30 +02:00
|
|
|
return (animated.write_to_buffer(write_file_ext), first_still)
|
2024-06-20 23:58:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
def missing_thumbnails(image_attachment: ImageAttachment) -> list[ThumbnailFormat]:
|
|
|
|
seen_thumbnails: set[StoredThumbnailFormat] = set()
|
|
|
|
for existing_thumbnail in image_attachment.thumbnail_metadata:
|
|
|
|
seen_thumbnails.add(StoredThumbnailFormat(**existing_thumbnail))
|
|
|
|
|
|
|
|
# We use the shared `__eq__` method from BaseThumbnailFormat to
|
|
|
|
# compare between the StoredThumbnailFormat values pulled from the
|
|
|
|
# database, and the ThumbnailFormat values in
|
|
|
|
# THUMBNAIL_OUTPUT_FORMATS.
|
|
|
|
needed_thumbnails = [
|
|
|
|
thumbnail_format
|
|
|
|
for thumbnail_format in THUMBNAIL_OUTPUT_FORMATS
|
|
|
|
if thumbnail_format not in seen_thumbnails
|
|
|
|
]
|
|
|
|
|
|
|
|
if image_attachment.frames == 1:
|
|
|
|
# We do not generate -anim versions if the source is still
|
|
|
|
needed_thumbnails = [
|
|
|
|
thumbnail_format
|
|
|
|
for thumbnail_format in needed_thumbnails
|
|
|
|
if not thumbnail_format.animated
|
|
|
|
]
|
|
|
|
|
|
|
|
return needed_thumbnails
|
|
|
|
|
|
|
|
|
|
|
|
def maybe_thumbnail(attachment: AbstractAttachment, content: bytes) -> ImageAttachment | None:
|
|
|
|
if attachment.content_type not in THUMBNAIL_ACCEPT_IMAGE_TYPES:
|
|
|
|
# If it doesn't self-report as an image file that we might want
|
|
|
|
# to thumbnail, don't parse the bytes at all.
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
# This only attempts to read the header, not the full image content
|
|
|
|
with libvips_check_image(content) as image:
|
2024-07-24 17:01:20 +02:00
|
|
|
# "original_width_px" and "original_height_px" here are
|
|
|
|
# _as rendered_, after applying the orientation
|
|
|
|
# information which the image may contain.
|
|
|
|
if (
|
|
|
|
"orientation" in image.get_fields()
|
|
|
|
and image.get("orientation") >= 5
|
|
|
|
and image.get("orientation") <= 8
|
|
|
|
):
|
|
|
|
(width, height) = (image.height, image.width)
|
|
|
|
else:
|
|
|
|
(width, height) = (image.width, image.height)
|
|
|
|
|
2024-06-20 23:58:27 +02:00
|
|
|
image_row = ImageAttachment.objects.create(
|
|
|
|
realm_id=attachment.realm_id,
|
|
|
|
path_id=attachment.path_id,
|
2024-07-24 17:01:20 +02:00
|
|
|
original_width_px=width,
|
|
|
|
original_height_px=height,
|
2024-06-20 23:58:27 +02:00
|
|
|
frames=image.get_n_pages(),
|
|
|
|
thumbnail_metadata=[],
|
|
|
|
)
|
|
|
|
queue_event_on_commit("thumbnail", {"id": image_row.id})
|
|
|
|
return image_row
|
|
|
|
except BadImageError:
|
|
|
|
return None
|
2024-07-17 03:11:57 +02:00
|
|
|
|
|
|
|
|
|
|
|
def get_image_thumbnail_path(
|
|
|
|
image_attachment: ImageAttachment,
|
|
|
|
thumbnail_format: BaseThumbnailFormat,
|
|
|
|
) -> str:
|
|
|
|
return f"thumbnail/{image_attachment.path_id}/{thumbnail_format!s}"
|
|
|
|
|
|
|
|
|
|
|
|
def split_thumbnail_path(file_path: str) -> tuple[str, BaseThumbnailFormat]:
|
|
|
|
assert file_path.startswith("thumbnail/")
|
|
|
|
path_parts = file_path.split("/")
|
|
|
|
thumbnail_format = BaseThumbnailFormat.from_string(path_parts.pop())
|
|
|
|
assert thumbnail_format is not None
|
|
|
|
path_id = "/".join(path_parts[1:])
|
|
|
|
return path_id, thumbnail_format
|
2024-06-21 21:02:36 +02:00
|
|
|
|
|
|
|
|
2024-07-22 23:07:59 +02:00
|
|
|
@dataclass
|
|
|
|
class MarkdownImageMetadata:
|
2024-09-04 18:01:56 +02:00
|
|
|
url: str | None
|
2024-07-22 23:07:59 +02:00
|
|
|
is_animated: bool
|
2024-07-22 23:16:03 +02:00
|
|
|
original_width_px: int
|
|
|
|
original_height_px: int
|
2024-07-22 23:07:59 +02:00
|
|
|
|
|
|
|
|
|
|
|
def get_user_upload_previews(
|
2024-07-31 18:07:10 +02:00
|
|
|
realm_id: int,
|
|
|
|
content: str,
|
|
|
|
lock: bool = False,
|
thumbnail: Enqueue thumbnails when we render a spinner.
Thumbnails are usually enqueued in the worker when the image is
uploaded. However, for images which were uploaded before the
existence of the thumbnailing worker, and whose metadata was
backfilled (see previous commit) this leaves a permanent spinner,
since nothing triggers the thumbnail worker for them.
Enqueue a thumbnail worker for every spinner which we render into
Markdown. This ensures that _something_ is attempting to resolve the
spinner which the user sees. In the case of freshly-uploaded images
which are still in the queue, this results in a duplicate entry in the
thumbnailing queue -- this is harmless, since the worker determines
that all of the thumbnails we need have already been generated, and it
does no further work. However, in the case of historical uploads, it
properly kicks off the thumbnailing process and results in a
subsequent message update to include the freshly-generated thumbnail.
While specifically useful for backfilled uploads, this is also
generally a good safety step for a good user experience, as it also
prevents dropped events in the queue from unknown causes from leaving
perpetual spinners in the message feed.
Because `get_user_upload_previews` is potentially called twice for
every message with spinners (see 6f20c15ae9e5), we add an additional
flag to `get_user_upload_previews` to suppress a _second_ event from
being enqueued for every spinner generated.
2024-08-28 16:45:35 +02:00
|
|
|
enqueue: bool = True,
|
2024-07-31 18:07:10 +02:00
|
|
|
path_ids: list[str] | None = None,
|
2024-09-04 18:01:56 +02:00
|
|
|
) -> dict[str, MarkdownImageMetadata]:
|
2024-07-31 18:07:10 +02:00
|
|
|
if path_ids is None:
|
|
|
|
path_ids = re.findall(r"/user_uploads/(\d+/[/\w.-]+)", content)
|
|
|
|
if not path_ids:
|
|
|
|
return {}
|
|
|
|
|
2024-09-04 18:01:56 +02:00
|
|
|
upload_preview_data: dict[str, MarkdownImageMetadata] = {}
|
2024-07-31 18:07:10 +02:00
|
|
|
|
2024-09-17 16:32:45 +02:00
|
|
|
image_attachments = ImageAttachment.objects.filter(
|
|
|
|
realm_id=realm_id, path_id__in=path_ids
|
|
|
|
).order_by("id")
|
2024-07-31 18:07:10 +02:00
|
|
|
if lock:
|
|
|
|
image_attachments = image_attachments.select_for_update()
|
|
|
|
for image_attachment in image_attachments:
|
2024-06-21 21:02:36 +02:00
|
|
|
if image_attachment.thumbnail_metadata == []:
|
|
|
|
# Image exists, and header of it parsed as a valid image,
|
|
|
|
# but has not been thumbnailed yet; we will render a
|
|
|
|
# spinner.
|
2024-09-04 18:01:56 +02:00
|
|
|
upload_preview_data[image_attachment.path_id] = MarkdownImageMetadata(
|
|
|
|
url=None,
|
|
|
|
is_animated=False,
|
|
|
|
original_width_px=image_attachment.original_width_px,
|
|
|
|
original_height_px=image_attachment.original_height_px,
|
|
|
|
)
|
thumbnail: Enqueue thumbnails when we render a spinner.
Thumbnails are usually enqueued in the worker when the image is
uploaded. However, for images which were uploaded before the
existence of the thumbnailing worker, and whose metadata was
backfilled (see previous commit) this leaves a permanent spinner,
since nothing triggers the thumbnail worker for them.
Enqueue a thumbnail worker for every spinner which we render into
Markdown. This ensures that _something_ is attempting to resolve the
spinner which the user sees. In the case of freshly-uploaded images
which are still in the queue, this results in a duplicate entry in the
thumbnailing queue -- this is harmless, since the worker determines
that all of the thumbnails we need have already been generated, and it
does no further work. However, in the case of historical uploads, it
properly kicks off the thumbnailing process and results in a
subsequent message update to include the freshly-generated thumbnail.
While specifically useful for backfilled uploads, this is also
generally a good safety step for a good user experience, as it also
prevents dropped events in the queue from unknown causes from leaving
perpetual spinners in the message feed.
Because `get_user_upload_previews` is potentially called twice for
every message with spinners (see 6f20c15ae9e5), we add an additional
flag to `get_user_upload_previews` to suppress a _second_ event from
being enqueued for every spinner generated.
2024-08-28 16:45:35 +02:00
|
|
|
|
|
|
|
# We re-queue the row for thumbnailing to make sure that
|
|
|
|
# we do eventually thumbnail it (e.g. if this is a
|
|
|
|
# historical upload from before this system, which we
|
|
|
|
# backfilled ImageAttachment rows for); this is a no-op in
|
|
|
|
# the worker if all of the currently-configured thumbnail
|
|
|
|
# formats have already been generated.
|
|
|
|
if enqueue:
|
|
|
|
queue_event_on_commit("thumbnail", {"id": image_attachment.id})
|
2024-06-21 21:02:36 +02:00
|
|
|
else:
|
2024-07-22 23:07:59 +02:00
|
|
|
url, is_animated = get_default_thumbnail_url(image_attachment)
|
|
|
|
upload_preview_data[image_attachment.path_id] = MarkdownImageMetadata(
|
|
|
|
url=url,
|
|
|
|
is_animated=is_animated,
|
2024-07-22 23:16:03 +02:00
|
|
|
original_width_px=image_attachment.original_width_px,
|
|
|
|
original_height_px=image_attachment.original_height_px,
|
2024-06-21 21:02:36 +02:00
|
|
|
)
|
|
|
|
return upload_preview_data
|
|
|
|
|
|
|
|
|
|
|
|
def get_default_thumbnail_url(image_attachment: ImageAttachment) -> tuple[str, bool]:
|
|
|
|
# For "dumb" clients which cannot rewrite it into their
|
|
|
|
# preferred format and size, we choose the first one in
|
|
|
|
# THUMBNAIL_OUTPUT_FORMATS which matches the animated/not
|
|
|
|
# nature of the source image.
|
|
|
|
found_format: ThumbnailFormat | None = None
|
|
|
|
for thumbnail_format in THUMBNAIL_OUTPUT_FORMATS:
|
|
|
|
if thumbnail_format.animated == (image_attachment.frames > 1):
|
|
|
|
found_format = thumbnail_format
|
|
|
|
break
|
|
|
|
if found_format is None:
|
|
|
|
# No animated thumbnail formats exist somehow, and the
|
|
|
|
# image is animated? Just take the first thumbnail
|
|
|
|
# format.
|
|
|
|
found_format = THUMBNAIL_OUTPUT_FORMATS[0]
|
|
|
|
return (
|
|
|
|
"/user_uploads/" + get_image_thumbnail_path(image_attachment, found_format),
|
|
|
|
found_format.animated,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-09-05 22:21:51 +02:00
|
|
|
# Like HTMLFormatter.REGISTRY["html5"], this formatter avoids producing
|
|
|
|
# self-closing tags, but it differs by avoiding unnecessary escaping with
|
|
|
|
# HTML5-specific entities that cannot be parsed by lxml and libxml2
|
|
|
|
# (https://bugs.launchpad.net/lxml/+bug/2031045).
|
|
|
|
html_formatter = HTMLFormatter(
|
|
|
|
entity_substitution=EntitySubstitution.substitute_xml, # not substitute_html
|
|
|
|
void_element_close_prefix="",
|
|
|
|
empty_attributes_are_booleans=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-06-21 21:02:36 +02:00
|
|
|
def rewrite_thumbnailed_images(
|
|
|
|
rendered_content: str,
|
2024-09-04 18:01:56 +02:00
|
|
|
images: dict[str, MarkdownImageMetadata],
|
2024-06-21 21:02:36 +02:00
|
|
|
to_delete: set[str] | None = None,
|
2024-07-31 18:07:10 +02:00
|
|
|
) -> tuple[str | None, set[str]]:
|
2024-06-21 21:02:36 +02:00
|
|
|
if not images and not to_delete:
|
2024-07-31 18:07:10 +02:00
|
|
|
return None, set()
|
2024-06-21 21:02:36 +02:00
|
|
|
|
2024-07-31 18:07:10 +02:00
|
|
|
remaining_thumbnails = set()
|
2024-06-21 21:02:36 +02:00
|
|
|
parsed_message = BeautifulSoup(rendered_content, "html.parser")
|
|
|
|
|
|
|
|
changed = False
|
|
|
|
for inline_image_div in parsed_message.find_all("div", class_="message_inline_image"):
|
|
|
|
image_link = inline_image_div.find("a")
|
|
|
|
if (
|
|
|
|
image_link is None
|
|
|
|
or image_link["href"] is None
|
|
|
|
or not image_link["href"].startswith("/user_uploads/")
|
|
|
|
):
|
|
|
|
# This is not an inline image generated by the markdown
|
|
|
|
# processor for a locally-uploaded image.
|
|
|
|
continue
|
|
|
|
image_tag = image_link.find("img", class_="image-loading-placeholder")
|
|
|
|
if image_tag is None:
|
|
|
|
# The placeholder was already replaced -- for instance,
|
|
|
|
# this is expected if multiple images are included in the
|
|
|
|
# same message. The second time this is run, for the
|
|
|
|
# second image, the first image will have no placeholder.
|
|
|
|
continue
|
|
|
|
|
2024-09-03 19:42:14 +02:00
|
|
|
path_id = image_link["href"].removeprefix("/user_uploads/")
|
2024-06-21 21:02:36 +02:00
|
|
|
if to_delete and path_id in to_delete:
|
|
|
|
# This was not a valid thumbnail target, for some reason.
|
|
|
|
# Trim out the whole "message_inline_image" element, since
|
|
|
|
# it's not going be renderable by clients either.
|
|
|
|
inline_image_div.decompose()
|
|
|
|
changed = True
|
|
|
|
continue
|
|
|
|
|
|
|
|
image_data = images.get(path_id)
|
|
|
|
if image_data is None:
|
2024-09-04 18:01:56 +02:00
|
|
|
# The message has multiple images, and we're updating just
|
|
|
|
# one image, and it's not this one. Leave this one as-is.
|
|
|
|
remaining_thumbnails.add(path_id)
|
|
|
|
elif image_data.url is None:
|
|
|
|
# We're re-rendering the whole message, so fetched all of
|
|
|
|
# the image metadata rows; this is one of the images we
|
|
|
|
# about, but is not thumbnailed yet.
|
2024-07-31 18:07:10 +02:00
|
|
|
remaining_thumbnails.add(path_id)
|
2024-06-21 21:02:36 +02:00
|
|
|
else:
|
|
|
|
changed = True
|
|
|
|
del image_tag["class"]
|
2024-07-22 23:07:59 +02:00
|
|
|
image_tag["src"] = image_data.url
|
2024-07-22 23:16:03 +02:00
|
|
|
image_tag["data-original-dimensions"] = (
|
|
|
|
f"{image_data.original_width_px}x{image_data.original_height_px}"
|
|
|
|
)
|
2024-07-22 23:07:59 +02:00
|
|
|
if image_data.is_animated:
|
2024-06-21 21:02:36 +02:00
|
|
|
image_tag["data-animated"] = "true"
|
|
|
|
|
|
|
|
if changed:
|
2024-09-05 22:21:51 +02:00
|
|
|
return parsed_message.encode(
|
|
|
|
formatter=html_formatter
|
|
|
|
).decode().strip(), remaining_thumbnails
|
2024-06-21 21:02:36 +02:00
|
|
|
else:
|
2024-07-31 18:07:10 +02:00
|
|
|
return None, remaining_thumbnails
|