2017-04-14 00:27:31 +02:00
|
|
|
# Generated by Django 1.10.5 on 2017-04-13 22:12
|
|
|
|
from django.db import migrations
|
2022-05-27 23:33:51 +02:00
|
|
|
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
|
2017-04-14 00:27:31 +02:00
|
|
|
from django.db.migrations.state import StateApps
|
|
|
|
from django.db.models import Count
|
|
|
|
|
2020-01-14 21:59:46 +01:00
|
|
|
|
2022-05-27 23:33:51 +02:00
|
|
|
def fix_duplicate_attachments(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
|
2017-04-14 00:27:31 +02:00
|
|
|
"""Migration 0041 had a bug, where if multiple messages referenced the
|
|
|
|
same attachment, rather than creating a single attachment object
|
|
|
|
for all of them, we would incorrectly create one for each message.
|
|
|
|
This results in exceptions looking up the Attachment object
|
|
|
|
corresponding to a file that was used in multiple messages that
|
|
|
|
predate migration 0041.
|
|
|
|
|
|
|
|
This migration fixes this by removing the duplicates, moving their
|
|
|
|
messages onto a single canonical Attachment object (per path_id).
|
|
|
|
"""
|
2021-02-12 08:20:45 +01:00
|
|
|
Attachment = apps.get_model("zerver", "Attachment")
|
2017-04-14 00:27:31 +02:00
|
|
|
# Loop through all groups of Attachment objects with the same `path_id`
|
2021-02-12 08:19:30 +01:00
|
|
|
for group in (
|
2021-02-12 08:20:45 +01:00
|
|
|
Attachment.objects.values("path_id")
|
|
|
|
.annotate(Count("id"))
|
2021-02-12 08:19:30 +01:00
|
|
|
.order_by()
|
|
|
|
.filter(id__count__gt=1)
|
|
|
|
):
|
2017-04-14 00:27:31 +02:00
|
|
|
# Sort by the minimum message ID, to find the first attachment
|
2021-02-12 08:19:30 +01:00
|
|
|
attachments = sorted(
|
2021-02-12 08:20:45 +01:00
|
|
|
Attachment.objects.filter(path_id=group["path_id"]).order_by("id"),
|
|
|
|
key=lambda x: min(x.messages.all().values_list("id")[0]),
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2017-04-14 00:27:31 +02:00
|
|
|
surviving = attachments[0]
|
|
|
|
to_cleanup = attachments[1:]
|
|
|
|
for a in to_cleanup:
|
|
|
|
# For each duplicate attachment, we transfer its messages
|
|
|
|
# to the canonical attachment object for that path, and
|
|
|
|
# then delete the original attachment.
|
|
|
|
for msg in a.messages.all():
|
|
|
|
surviving.messages.add(msg)
|
|
|
|
surviving.is_realm_public = surviving.is_realm_public or a.is_realm_public
|
|
|
|
surviving.save()
|
|
|
|
a.delete()
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-04-14 00:27:31 +02:00
|
|
|
class Migration(migrations.Migration):
|
|
|
|
dependencies = [
|
2021-02-12 08:20:45 +01:00
|
|
|
("zerver", "0073_custom_profile_fields"),
|
2017-04-14 00:27:31 +02:00
|
|
|
]
|
|
|
|
|
|
|
|
operations = [
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
migrations.RunPython(fix_duplicate_attachments, elidable=True),
|
2017-04-14 00:27:31 +02:00
|
|
|
]
|