2024-05-15 12:07:08 +02:00
|
|
|
import collections
|
2018-04-23 23:28:27 +02:00
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import shutil
|
2022-07-29 09:10:34 +02:00
|
|
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
2023-11-19 19:45:19 +01:00
|
|
|
from datetime import datetime, timezone
|
2024-07-12 02:30:23 +02:00
|
|
|
from typing import Any
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2022-07-19 00:14:23 +02:00
|
|
|
import bmemcached
|
2020-08-07 01:09:47 +02:00
|
|
|
import orjson
|
thumbnail: Make thumbnailing work with data import.
We didn't have thumbnailing for images coming from data import and this
commit adds the functionality.
There are a few fundamental issues that the implementation needs to
solve.
1. The images come from an untrusted source and therefore we don't want
to just pass them through to thumbnailing without checking. For that
reason, we cannot just import ImageAttachment rows from the export
data, even for zulip=>zulip imports.
The right way to process images is to pass them to maybe_thumbail(),
which runs libvips_check_image() on them to verify we're okay with
thumbnailing, creates ImageAttachment rows for them and sends them
to the thumbnailing queue worker. This approach lets us handle both
zulip=>zulip and 3rd party=>zulip imports in the same way,
2. There is a somewhat circular dependency between the Message,
Attachment and ImageAttachment import process:
- ImageAttachments would ideally be created after importing
Attachments, but they need to already exist at the time of Message
import. Otherwise, the markdown processor doesn't know it has to add
HTML for image previews to messages that reference images. This would
mean that messages imported from 3rd party tools don't get image
previews.
- Attachments only get created after Message import however, due to the
many-to-many relationship between Message and Attachment.
This is solved by fixing up some data of Attachments pre-emptively, such
as the path_ids. This gives us the necessary information for creating
ImageAttachments before importing Messages.
While we generate ImageAttachment rows synchronously, the actual
thumbnailing job is sent to the queue worker. Theoretically, the worker
could be very backlogged and not process the thumbnails anytime soon.
This is fine - if the app is loaded and tries to display a message with
such a not-yet-generated thumbnail, the code in `serve_file` will
generate the thumbnails synchronously on the fly and the user will see
the image preview displayed normally. See:
https://github.com/zulip/zulip/blob/1b47134d0d564f8ba4961d25743f3ad0f09e6dfb/zerver/views/upload.py#L333-L342
2024-10-17 21:20:49 +02:00
|
|
|
import pyvips
|
2019-05-23 13:58:10 +02:00
|
|
|
from bs4 import BeautifulSoup
|
2018-04-23 23:28:27 +02:00
|
|
|
from django.conf import settings
|
2020-10-01 00:20:02 +02:00
|
|
|
from django.core.cache import cache
|
2022-08-26 23:14:12 +02:00
|
|
|
from django.core.validators import validate_email
|
2023-06-07 14:24:12 +02:00
|
|
|
from django.db import connection, transaction
|
2024-08-14 04:56:06 +02:00
|
|
|
from django.db.backends.utils import CursorWrapper
|
2020-06-05 06:55:20 +02:00
|
|
|
from django.utils.timezone import now as timezone_now
|
2020-06-09 11:01:54 +02:00
|
|
|
from psycopg2.extras import execute_values
|
2020-06-11 00:54:34 +02:00
|
|
|
from psycopg2.sql import SQL, Identifier
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2019-01-05 01:15:49 +01:00
|
|
|
from analytics.models import RealmCount, StreamCount, UserCount
|
2023-08-09 15:06:56 +02:00
|
|
|
from zerver.actions.create_realm import set_default_for_realm_permission_group_settings
|
2022-04-14 23:57:15 +02:00
|
|
|
from zerver.actions.realm_settings import do_change_realm_plan_type
|
2022-04-14 23:49:26 +02:00
|
|
|
from zerver.actions.user_settings import do_change_avatar_fields
|
2024-06-13 14:57:18 +02:00
|
|
|
from zerver.lib.avatar_hash import user_avatar_base_path_from_ids
|
2022-08-11 20:04:10 +02:00
|
|
|
from zerver.lib.bulk_create import bulk_set_users_or_streams_recipient_fields
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.export import DATE_FIELDS, Field, Path, Record, TableData, TableName
|
2020-07-04 14:34:46 +02:00
|
|
|
from zerver.lib.markdown import markdown_convert
|
2020-06-27 22:47:06 +02:00
|
|
|
from zerver.lib.markdown import version as markdown_version
|
2020-10-13 15:49:40 +02:00
|
|
|
from zerver.lib.message import get_last_message_id
|
2024-06-18 23:41:37 +02:00
|
|
|
from zerver.lib.mime_types import guess_type
|
2024-07-23 21:55:01 +02:00
|
|
|
from zerver.lib.partial import partial
|
2023-11-23 22:07:41 +01:00
|
|
|
from zerver.lib.push_notifications import sends_notifications_directly
|
2023-12-11 04:59:00 +01:00
|
|
|
from zerver.lib.remote_server import maybe_enqueue_audit_log_upload
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.server_initialization import create_internal_realm, server_initialized
|
2020-03-24 14:47:41 +01:00
|
|
|
from zerver.lib.streams import render_stream_description
|
thumbnail: Make thumbnailing work with data import.
We didn't have thumbnailing for images coming from data import and this
commit adds the functionality.
There are a few fundamental issues that the implementation needs to
solve.
1. The images come from an untrusted source and therefore we don't want
to just pass them through to thumbnailing without checking. For that
reason, we cannot just import ImageAttachment rows from the export
data, even for zulip=>zulip imports.
The right way to process images is to pass them to maybe_thumbail(),
which runs libvips_check_image() on them to verify we're okay with
thumbnailing, creates ImageAttachment rows for them and sends them
to the thumbnailing queue worker. This approach lets us handle both
zulip=>zulip and 3rd party=>zulip imports in the same way,
2. There is a somewhat circular dependency between the Message,
Attachment and ImageAttachment import process:
- ImageAttachments would ideally be created after importing
Attachments, but they need to already exist at the time of Message
import. Otherwise, the markdown processor doesn't know it has to add
HTML for image previews to messages that reference images. This would
mean that messages imported from 3rd party tools don't get image
previews.
- Attachments only get created after Message import however, due to the
many-to-many relationship between Message and Attachment.
This is solved by fixing up some data of Attachments pre-emptively, such
as the path_ids. This gives us the necessary information for creating
ImageAttachments before importing Messages.
While we generate ImageAttachment rows synchronously, the actual
thumbnailing job is sent to the queue worker. Theoretically, the worker
could be very backlogged and not process the thumbnails anytime soon.
This is fine - if the app is loaded and tries to display a message with
such a not-yet-generated thumbnail, the code in `serve_file` will
generate the thumbnails synchronously on the fly and the user will see
the image preview displayed normally. See:
https://github.com/zulip/zulip/blob/1b47134d0d564f8ba4961d25743f3ad0f09e6dfb/zerver/views/upload.py#L333-L342
2024-10-17 21:20:49 +02:00
|
|
|
from zerver.lib.thumbnail import THUMBNAIL_ACCEPT_IMAGE_TYPES, BadImageError, maybe_thumbnail
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.timestamp import datetime_to_timestamp
|
2024-07-23 21:55:01 +02:00
|
|
|
from zerver.lib.upload import ensure_avatar_image, sanitize_name, upload_backend, upload_emoji_image
|
2022-12-14 21:51:37 +01:00
|
|
|
from zerver.lib.upload.s3 import get_bucket
|
2023-12-11 23:26:38 +01:00
|
|
|
from zerver.lib.user_counts import realm_user_count_by_role
|
2021-10-18 16:42:20 +02:00
|
|
|
from zerver.lib.user_groups import create_system_user_groups_for_realm
|
2022-04-14 23:28:01 +02:00
|
|
|
from zerver.lib.user_message import UserMessageLite, bulk_insert_ums
|
2018-10-15 14:24:13 +02:00
|
|
|
from zerver.lib.utils import generate_api_key, process_list_in_batches
|
2024-05-24 17:19:24 +02:00
|
|
|
from zerver.lib.zulip_update_announcements import send_zulip_update_announcements_to_realm
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.models import (
|
2020-07-16 16:11:34 +02:00
|
|
|
AlertWord,
|
2020-06-11 00:54:34 +02:00
|
|
|
Attachment,
|
|
|
|
BotConfigData,
|
|
|
|
BotStorageData,
|
|
|
|
Client,
|
|
|
|
CustomProfileField,
|
|
|
|
CustomProfileFieldValue,
|
|
|
|
DefaultStream,
|
2024-07-05 13:13:40 +02:00
|
|
|
DirectMessageGroup,
|
2021-09-29 02:46:57 +02:00
|
|
|
GroupGroupMembership,
|
2020-06-11 00:54:34 +02:00
|
|
|
Message,
|
2021-03-27 12:23:32 +01:00
|
|
|
MutedUser,
|
2024-04-16 16:05:43 +02:00
|
|
|
NamedUserGroup,
|
2023-12-01 08:20:48 +01:00
|
|
|
OnboardingStep,
|
2024-07-16 11:03:41 +02:00
|
|
|
OnboardingUserMessage,
|
2020-06-11 00:54:34 +02:00
|
|
|
Reaction,
|
|
|
|
Realm,
|
|
|
|
RealmAuditLog,
|
2023-04-16 21:53:22 +02:00
|
|
|
RealmAuthenticationMethod,
|
2020-06-11 00:54:34 +02:00
|
|
|
RealmDomain,
|
|
|
|
RealmEmoji,
|
|
|
|
RealmFilter,
|
2021-02-14 12:07:09 +01:00
|
|
|
RealmPlayground,
|
2021-06-01 12:55:44 +02:00
|
|
|
RealmUserDefault,
|
2020-06-11 00:54:34 +02:00
|
|
|
Recipient,
|
2024-09-24 17:01:58 +02:00
|
|
|
SavedSnippet,
|
2023-04-29 20:45:22 +02:00
|
|
|
ScheduledMessage,
|
2020-06-11 00:54:34 +02:00
|
|
|
Service,
|
|
|
|
Stream,
|
|
|
|
Subscription,
|
|
|
|
UserActivity,
|
|
|
|
UserActivityInterval,
|
|
|
|
UserGroup,
|
|
|
|
UserGroupMembership,
|
|
|
|
UserMessage,
|
|
|
|
UserPresence,
|
|
|
|
UserProfile,
|
2021-12-05 13:42:04 +01:00
|
|
|
UserStatus,
|
2021-07-23 15:26:02 +02:00
|
|
|
UserTopic,
|
2020-06-11 00:54:34 +02:00
|
|
|
)
|
2023-12-15 01:55:59 +01:00
|
|
|
from zerver.models.groups import SystemGroups
|
2024-05-26 02:38:57 +02:00
|
|
|
from zerver.models.presence import PresenceSequence
|
2024-09-03 15:58:19 +02:00
|
|
|
from zerver.models.realm_audit_logs import AuditLogEventType
|
2023-12-15 02:14:24 +01:00
|
|
|
from zerver.models.realms import get_realm
|
2024-07-04 14:05:48 +02:00
|
|
|
from zerver.models.recipients import get_direct_message_group_hash
|
2023-12-15 01:16:00 +01:00
|
|
|
from zerver.models.users import get_system_bot, get_user_profile_by_id
|
2024-02-05 23:52:25 +01:00
|
|
|
from zproject.backends import AUTH_BACKEND_NAME_MAP
|
2019-01-05 00:45:27 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
realm_tables = [
|
2023-04-16 21:53:22 +02:00
|
|
|
("zerver_realmauthenticationmethod", RealmAuthenticationMethod, "realmauthenticationmethod"),
|
2021-02-12 08:19:30 +01:00
|
|
|
("zerver_defaultstream", DefaultStream, "defaultstream"),
|
|
|
|
("zerver_realmemoji", RealmEmoji, "realmemoji"),
|
|
|
|
("zerver_realmdomain", RealmDomain, "realmdomain"),
|
|
|
|
("zerver_realmfilter", RealmFilter, "realmfilter"),
|
2021-02-14 12:07:09 +01:00
|
|
|
("zerver_realmplayground", RealmPlayground, "realmplayground"),
|
2021-02-12 08:19:30 +01:00
|
|
|
] # List[Tuple[TableName, Any, str]]
|
2018-04-23 23:28:27 +02:00
|
|
|
|
|
|
|
|
2018-10-17 20:15:52 +02:00
|
|
|
# ID_MAP is a dictionary that maps table names to dictionaries
|
2018-04-23 23:28:27 +02:00
|
|
|
# that map old ids to new ids. We use this in
|
|
|
|
# re_map_foreign_keys and other places.
|
|
|
|
#
|
2020-03-28 01:25:56 +01:00
|
|
|
# We explicitly initialize ID_MAP with the tables that support
|
2018-04-23 23:28:27 +02:00
|
|
|
# id re-mapping.
|
|
|
|
#
|
|
|
|
# Code reviewers: give these tables extra scrutiny, as we need to
|
|
|
|
# make sure to reload related tables AFTER we re-map the ids.
|
2024-07-12 02:30:17 +02:00
|
|
|
ID_MAP: dict[str, dict[int, int]] = {
|
2021-02-12 08:20:45 +01:00
|
|
|
"alertword": {},
|
|
|
|
"client": {},
|
|
|
|
"user_profile": {},
|
|
|
|
"huddle": {},
|
|
|
|
"realm": {},
|
|
|
|
"stream": {},
|
|
|
|
"recipient": {},
|
|
|
|
"subscription": {},
|
|
|
|
"defaultstream": {},
|
2023-12-01 08:20:48 +01:00
|
|
|
"onboardingstep": {},
|
2024-05-26 02:38:57 +02:00
|
|
|
"presencesequence": {},
|
2021-02-12 08:20:45 +01:00
|
|
|
"reaction": {},
|
2023-04-16 21:53:22 +02:00
|
|
|
"realmauthenticationmethod": {},
|
2021-02-12 08:20:45 +01:00
|
|
|
"realmemoji": {},
|
|
|
|
"realmdomain": {},
|
|
|
|
"realmfilter": {},
|
2021-02-14 12:07:09 +01:00
|
|
|
"realmplayground": {},
|
2021-02-12 08:20:45 +01:00
|
|
|
"message": {},
|
|
|
|
"user_presence": {},
|
2021-12-05 13:42:04 +01:00
|
|
|
"userstatus": {},
|
2021-02-12 08:20:45 +01:00
|
|
|
"useractivity": {},
|
|
|
|
"useractivityinterval": {},
|
|
|
|
"usermessage": {},
|
|
|
|
"customprofilefield": {},
|
|
|
|
"customprofilefieldvalue": {},
|
|
|
|
"attachment": {},
|
|
|
|
"realmauditlog": {},
|
|
|
|
"recipient_to_huddle_map": {},
|
2021-07-31 07:39:59 +02:00
|
|
|
"usertopic": {},
|
2021-03-27 12:23:32 +01:00
|
|
|
"muteduser": {},
|
2021-02-12 08:20:45 +01:00
|
|
|
"service": {},
|
|
|
|
"usergroup": {},
|
|
|
|
"usergroupmembership": {},
|
2021-09-29 02:46:57 +02:00
|
|
|
"groupgroupmembership": {},
|
2021-02-12 08:20:45 +01:00
|
|
|
"botstoragedata": {},
|
|
|
|
"botconfigdata": {},
|
|
|
|
"analytics_realmcount": {},
|
|
|
|
"analytics_streamcount": {},
|
|
|
|
"analytics_usercount": {},
|
2021-06-01 12:55:44 +02:00
|
|
|
"realmuserdefault": {},
|
2023-04-29 20:45:22 +02:00
|
|
|
"scheduledmessage": {},
|
2024-07-16 11:03:41 +02:00
|
|
|
"onboardingusermessage": {},
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
}
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
id_map_to_list: dict[str, dict[int, list[int]]] = {
|
2021-02-12 08:20:45 +01:00
|
|
|
"huddle_to_user_list": {},
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
}
|
2018-05-25 18:54:22 +02:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
path_maps: dict[str, dict[str, str]] = {
|
thumbnail: Make thumbnailing work with data import.
We didn't have thumbnailing for images coming from data import and this
commit adds the functionality.
There are a few fundamental issues that the implementation needs to
solve.
1. The images come from an untrusted source and therefore we don't want
to just pass them through to thumbnailing without checking. For that
reason, we cannot just import ImageAttachment rows from the export
data, even for zulip=>zulip imports.
The right way to process images is to pass them to maybe_thumbail(),
which runs libvips_check_image() on them to verify we're okay with
thumbnailing, creates ImageAttachment rows for them and sends them
to the thumbnailing queue worker. This approach lets us handle both
zulip=>zulip and 3rd party=>zulip imports in the same way,
2. There is a somewhat circular dependency between the Message,
Attachment and ImageAttachment import process:
- ImageAttachments would ideally be created after importing
Attachments, but they need to already exist at the time of Message
import. Otherwise, the markdown processor doesn't know it has to add
HTML for image previews to messages that reference images. This would
mean that messages imported from 3rd party tools don't get image
previews.
- Attachments only get created after Message import however, due to the
many-to-many relationship between Message and Attachment.
This is solved by fixing up some data of Attachments pre-emptively, such
as the path_ids. This gives us the necessary information for creating
ImageAttachments before importing Messages.
While we generate ImageAttachment rows synchronously, the actual
thumbnailing job is sent to the queue worker. Theoretically, the worker
could be very backlogged and not process the thumbnails anytime soon.
This is fine - if the app is loaded and tries to display a message with
such a not-yet-generated thumbnail, the code in `serve_file` will
generate the thumbnails synchronously on the fly and the user will see
the image preview displayed normally. See:
https://github.com/zulip/zulip/blob/1b47134d0d564f8ba4961d25743f3ad0f09e6dfb/zerver/views/upload.py#L333-L342
2024-10-17 21:20:49 +02:00
|
|
|
# Maps original attachment path pre-import to the final, post-import
|
|
|
|
# attachment path.
|
|
|
|
"old_attachment_path_to_new_path": {},
|
|
|
|
# Inverse of old_attachment_path_to_new_path.
|
|
|
|
"new_attachment_path_to_old_path": {},
|
|
|
|
# Maps the new (post-import) attachment path to the absolute path to the file
|
|
|
|
# in the on-disk export data that we're importing.
|
|
|
|
# Allows code running after this is filled to access file contents
|
|
|
|
# without needing to go through S3 to get it.
|
|
|
|
"new_attachment_path_to_local_data_path": {},
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
}
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
message_id_to_attachments: dict[str, dict[int, list[str]]] = {
|
2024-05-15 12:07:08 +02:00
|
|
|
"zerver_message": collections.defaultdict(list),
|
|
|
|
"zerver_scheduledmessage": collections.defaultdict(list),
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def map_messages_to_attachments(data: TableData) -> None:
|
|
|
|
for attachment in data["zerver_attachment"]:
|
|
|
|
for message_id in attachment["messages"]:
|
|
|
|
message_id_to_attachments["zerver_message"][message_id].append(attachment["path_id"])
|
|
|
|
|
|
|
|
for scheduled_message_id in attachment["scheduled_messages"]:
|
|
|
|
message_id_to_attachments["zerver_scheduledmessage"][scheduled_message_id].append(
|
|
|
|
attachment["path_id"]
|
|
|
|
)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
def update_id_map(table: TableName, old_id: int, new_id: int) -> None:
|
2018-10-17 20:15:52 +02:00
|
|
|
if table not in ID_MAP:
|
2021-02-12 08:19:30 +01:00
|
|
|
raise Exception(
|
2021-02-12 08:20:45 +01:00
|
|
|
f"""
|
2020-06-13 08:57:35 +02:00
|
|
|
Table {table} is not initialized in ID_MAP, which could
|
2018-04-23 23:28:27 +02:00
|
|
|
mean that we have not thought through circular
|
|
|
|
dependencies.
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2018-10-17 20:15:52 +02:00
|
|
|
ID_MAP[table][old_id] = new_id
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
def fix_datetime_fields(data: TableData, table: TableName) -> None:
|
|
|
|
for item in data[table]:
|
|
|
|
for field_name in DATE_FIELDS[table]:
|
|
|
|
if item[field_name] is not None:
|
2023-11-19 19:45:19 +01:00
|
|
|
item[field_name] = datetime.fromtimestamp(item[field_name], tz=timezone.utc)
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
|
|
|
|
def fix_upload_links(data: TableData, message_table: TableName) -> None:
|
|
|
|
"""
|
|
|
|
Because the URLs for uploaded files encode the realm ID of the
|
|
|
|
organization being imported (which is only determined at import
|
|
|
|
time), we need to rewrite the URLs of links to uploaded files
|
|
|
|
during the import process.
|
2024-05-15 12:07:08 +02:00
|
|
|
|
|
|
|
Applied to attachments path_id found in messages of zerver_message and zerver_scheduledmessage tables.
|
2018-04-23 23:28:27 +02:00
|
|
|
"""
|
|
|
|
for message in data[message_table]:
|
2021-02-12 08:20:45 +01:00
|
|
|
if message["has_attachment"] is True:
|
2024-05-15 12:07:08 +02:00
|
|
|
for attachment_path in message_id_to_attachments[message_table][message["id"]]:
|
thumbnail: Make thumbnailing work with data import.
We didn't have thumbnailing for images coming from data import and this
commit adds the functionality.
There are a few fundamental issues that the implementation needs to
solve.
1. The images come from an untrusted source and therefore we don't want
to just pass them through to thumbnailing without checking. For that
reason, we cannot just import ImageAttachment rows from the export
data, even for zulip=>zulip imports.
The right way to process images is to pass them to maybe_thumbail(),
which runs libvips_check_image() on them to verify we're okay with
thumbnailing, creates ImageAttachment rows for them and sends them
to the thumbnailing queue worker. This approach lets us handle both
zulip=>zulip and 3rd party=>zulip imports in the same way,
2. There is a somewhat circular dependency between the Message,
Attachment and ImageAttachment import process:
- ImageAttachments would ideally be created after importing
Attachments, but they need to already exist at the time of Message
import. Otherwise, the markdown processor doesn't know it has to add
HTML for image previews to messages that reference images. This would
mean that messages imported from 3rd party tools don't get image
previews.
- Attachments only get created after Message import however, due to the
many-to-many relationship between Message and Attachment.
This is solved by fixing up some data of Attachments pre-emptively, such
as the path_ids. This gives us the necessary information for creating
ImageAttachments before importing Messages.
While we generate ImageAttachment rows synchronously, the actual
thumbnailing job is sent to the queue worker. Theoretically, the worker
could be very backlogged and not process the thumbnails anytime soon.
This is fine - if the app is loaded and tries to display a message with
such a not-yet-generated thumbnail, the code in `serve_file` will
generate the thumbnails synchronously on the fly and the user will see
the image preview displayed normally. See:
https://github.com/zulip/zulip/blob/1b47134d0d564f8ba4961d25743f3ad0f09e6dfb/zerver/views/upload.py#L333-L342
2024-10-17 21:20:49 +02:00
|
|
|
old_path = path_maps["new_attachment_path_to_old_path"][attachment_path]
|
|
|
|
message["content"] = message["content"].replace(old_path, attachment_path)
|
2024-05-15 12:07:08 +02:00
|
|
|
|
|
|
|
if message["rendered_content"]:
|
|
|
|
message["rendered_content"] = message["rendered_content"].replace(
|
thumbnail: Make thumbnailing work with data import.
We didn't have thumbnailing for images coming from data import and this
commit adds the functionality.
There are a few fundamental issues that the implementation needs to
solve.
1. The images come from an untrusted source and therefore we don't want
to just pass them through to thumbnailing without checking. For that
reason, we cannot just import ImageAttachment rows from the export
data, even for zulip=>zulip imports.
The right way to process images is to pass them to maybe_thumbail(),
which runs libvips_check_image() on them to verify we're okay with
thumbnailing, creates ImageAttachment rows for them and sends them
to the thumbnailing queue worker. This approach lets us handle both
zulip=>zulip and 3rd party=>zulip imports in the same way,
2. There is a somewhat circular dependency between the Message,
Attachment and ImageAttachment import process:
- ImageAttachments would ideally be created after importing
Attachments, but they need to already exist at the time of Message
import. Otherwise, the markdown processor doesn't know it has to add
HTML for image previews to messages that reference images. This would
mean that messages imported from 3rd party tools don't get image
previews.
- Attachments only get created after Message import however, due to the
many-to-many relationship between Message and Attachment.
This is solved by fixing up some data of Attachments pre-emptively, such
as the path_ids. This gives us the necessary information for creating
ImageAttachments before importing Messages.
While we generate ImageAttachment rows synchronously, the actual
thumbnailing job is sent to the queue worker. Theoretically, the worker
could be very backlogged and not process the thumbnails anytime soon.
This is fine - if the app is loaded and tries to display a message with
such a not-yet-generated thumbnail, the code in `serve_file` will
generate the thumbnails synchronously on the fly and the user will see
the image preview displayed normally. See:
https://github.com/zulip/zulip/blob/1b47134d0d564f8ba4961d25743f3ad0f09e6dfb/zerver/views/upload.py#L333-L342
2024-10-17 21:20:49 +02:00
|
|
|
old_path, attachment_path
|
2024-05-15 12:07:08 +02:00
|
|
|
)
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2022-07-13 20:44:28 +02:00
|
|
|
def fix_streams_can_remove_subscribers_group_column(data: TableData, realm: Realm) -> None:
|
|
|
|
table = get_db_table(Stream)
|
2024-04-02 18:39:18 +02:00
|
|
|
admins_group = NamedUserGroup.objects.get(
|
2023-09-21 13:06:39 +02:00
|
|
|
name=SystemGroups.ADMINISTRATORS, realm=realm, is_system_group=True
|
2022-07-13 20:44:28 +02:00
|
|
|
)
|
|
|
|
for stream in data[table]:
|
2023-07-12 19:30:23 +02:00
|
|
|
stream["can_remove_subscribers_group"] = admins_group
|
2022-07-13 20:44:28 +02:00
|
|
|
|
|
|
|
|
2018-09-30 12:54:16 +02:00
|
|
|
def create_subscription_events(data: TableData, realm_id: int) -> None:
|
2018-07-05 21:28:21 +02:00
|
|
|
"""
|
|
|
|
When the export data doesn't contain the table `zerver_realmauditlog`,
|
|
|
|
this function creates RealmAuditLog objects for `subscription_created`
|
|
|
|
type event for all the existing Stream subscriptions.
|
|
|
|
|
|
|
|
This is needed for all the export tools which do not include the
|
2024-05-02 13:58:38 +02:00
|
|
|
table `zerver_realmauditlog` (e.g. Slack) because the appropriate
|
2018-07-05 21:28:21 +02:00
|
|
|
data about when a user was subscribed is not exported by the third-party
|
|
|
|
service.
|
|
|
|
"""
|
|
|
|
all_subscription_logs = []
|
|
|
|
|
2020-10-13 15:49:40 +02:00
|
|
|
event_last_message_id = get_last_message_id()
|
2018-07-05 21:28:21 +02:00
|
|
|
event_time = timezone_now()
|
|
|
|
|
2018-09-30 12:54:16 +02:00
|
|
|
recipient_id_to_stream_id = {
|
2021-02-12 08:20:45 +01:00
|
|
|
d["id"]: d["type_id"] for d in data["zerver_recipient"] if d["type"] == Recipient.STREAM
|
2018-09-30 12:54:16 +02:00
|
|
|
}
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
for sub in data["zerver_subscription"]:
|
|
|
|
recipient_id = sub["recipient_id"]
|
2018-09-30 12:54:16 +02:00
|
|
|
stream_id = recipient_id_to_stream_id.get(recipient_id)
|
|
|
|
|
|
|
|
if stream_id is None:
|
2018-07-05 21:28:21 +02:00
|
|
|
continue
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
user_id = sub["user_profile_id"]
|
2018-07-05 21:28:21 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
all_subscription_logs.append(
|
|
|
|
RealmAuditLog(
|
|
|
|
realm_id=realm_id,
|
|
|
|
acting_user_id=user_id,
|
|
|
|
modified_user_id=user_id,
|
|
|
|
modified_stream_id=stream_id,
|
|
|
|
event_last_message_id=event_last_message_id,
|
|
|
|
event_time=event_time,
|
2024-09-03 17:25:32 +02:00
|
|
|
event_type=AuditLogEventType.SUBSCRIPTION_CREATED,
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
)
|
2018-07-05 21:28:21 +02:00
|
|
|
RealmAuditLog.objects.bulk_create(all_subscription_logs)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-07-14 17:18:24 +02:00
|
|
|
def fix_service_tokens(data: TableData, table: TableName) -> None:
|
|
|
|
"""
|
2018-08-01 11:18:37 +02:00
|
|
|
The tokens in the services are created by 'generate_api_key'.
|
2018-07-14 17:18:24 +02:00
|
|
|
As the tokens are unique, they should be re-created for the imports.
|
|
|
|
"""
|
|
|
|
for item in data[table]:
|
2021-02-12 08:20:45 +01:00
|
|
|
item["token"] = generate_api_key()
|
2018-07-14 17:18:24 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-04 14:05:48 +02:00
|
|
|
def process_direct_message_group_hash(data: TableData, table: TableName) -> None:
|
2018-05-25 18:54:22 +02:00
|
|
|
"""
|
2024-07-04 14:05:48 +02:00
|
|
|
Build new direct message group hashes with the updated ids of the users
|
2018-05-25 18:54:22 +02:00
|
|
|
"""
|
2024-07-04 14:05:48 +02:00
|
|
|
for direct_message_group in data[table]:
|
|
|
|
user_id_list = id_map_to_list["huddle_to_user_list"][direct_message_group["id"]]
|
|
|
|
direct_message_group["huddle_hash"] = get_direct_message_group_hash(user_id_list)
|
2018-05-25 18:54:22 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-04 14:05:48 +02:00
|
|
|
def get_direct_message_groups_from_subscription(data: TableData, table: TableName) -> None:
|
2018-05-25 18:54:22 +02:00
|
|
|
"""
|
2024-07-04 14:05:48 +02:00
|
|
|
Extract the IDs of the user_profiles involved in a direct message group from
|
|
|
|
the subscription object
|
|
|
|
This helps to generate a unique direct message group hash from the updated
|
|
|
|
user_profile ids
|
2018-05-25 18:54:22 +02:00
|
|
|
"""
|
2021-02-12 08:20:45 +01:00
|
|
|
id_map_to_list["huddle_to_user_list"] = {
|
|
|
|
value: [] for value in ID_MAP["recipient_to_huddle_map"].values()
|
2021-02-12 08:19:30 +01:00
|
|
|
}
|
2018-05-25 18:54:22 +02:00
|
|
|
|
|
|
|
for subscription in data[table]:
|
2021-02-12 08:20:45 +01:00
|
|
|
if subscription["recipient"] in ID_MAP["recipient_to_huddle_map"]:
|
2024-07-04 14:05:48 +02:00
|
|
|
direct_message_group_id = ID_MAP["recipient_to_huddle_map"][subscription["recipient"]]
|
|
|
|
id_map_to_list["huddle_to_user_list"][direct_message_group_id].append(
|
|
|
|
subscription["user_profile_id"]
|
|
|
|
)
|
2018-05-25 18:54:22 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-07-16 17:15:42 +02:00
|
|
|
def fix_customprofilefield(data: TableData) -> None:
|
|
|
|
"""
|
|
|
|
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
|
|
|
|
re-mapped.
|
|
|
|
"""
|
2023-07-31 22:52:35 +02:00
|
|
|
field_type_USER_ids = {
|
|
|
|
item["id"]
|
|
|
|
for item in data["zerver_customprofilefield"]
|
|
|
|
if item["field_type"] == CustomProfileField.USER
|
|
|
|
}
|
2018-07-16 17:15:42 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
for item in data["zerver_customprofilefieldvalue"]:
|
2023-07-31 22:52:35 +02:00
|
|
|
if item["field_id"] in field_type_USER_ids:
|
2021-02-12 08:20:45 +01:00
|
|
|
old_user_id_list = orjson.loads(item["value"])
|
2018-07-16 17:15:42 +02:00
|
|
|
|
|
|
|
new_id_list = re_map_foreign_keys_many_to_many_internal(
|
2021-02-12 08:20:45 +01:00
|
|
|
table="zerver_customprofilefieldvalue",
|
|
|
|
field_name="value",
|
|
|
|
related_table="user_profile",
|
2021-02-12 08:19:30 +01:00
|
|
|
old_id_list=old_user_id_list,
|
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
item["value"] = orjson.dumps(new_id_list).decode()
|
2018-07-16 17:15:42 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def fix_message_rendered_content(
|
2024-05-30 02:57:56 +02:00
|
|
|
realm: Realm,
|
2024-07-12 02:30:17 +02:00
|
|
|
sender_map: dict[int, Record],
|
|
|
|
messages: list[Record],
|
2024-05-30 02:57:56 +02:00
|
|
|
content_key: str = "content",
|
|
|
|
rendered_content_key: str = "rendered_content",
|
2021-02-12 08:19:30 +01:00
|
|
|
) -> None:
|
2018-08-09 17:50:43 +02:00
|
|
|
"""
|
2024-05-29 22:35:05 +02:00
|
|
|
This function sets the rendered_content of the messages we're importing.
|
2018-08-09 17:50:43 +02:00
|
|
|
"""
|
2018-11-02 15:21:30 +01:00
|
|
|
for message in messages:
|
2024-09-28 00:35:46 +02:00
|
|
|
if content_key not in message:
|
|
|
|
# Message-edit entries include topic moves, which don't
|
|
|
|
# have any content changes to process.
|
|
|
|
continue
|
|
|
|
|
2024-05-30 02:57:56 +02:00
|
|
|
if message[rendered_content_key] is not None:
|
2019-01-02 23:59:37 +01:00
|
|
|
# For Zulip->Zulip imports, we use the original rendered
|
2020-08-11 01:47:49 +02:00
|
|
|
# Markdown; this avoids issues where e.g. a mention can no
|
2019-01-02 23:59:37 +01:00
|
|
|
# longer render properly because a user has changed their
|
2019-05-23 13:58:10 +02:00
|
|
|
# name.
|
2019-01-02 23:59:37 +01:00
|
|
|
#
|
2019-05-23 13:58:10 +02:00
|
|
|
# However, we still need to update the data-user-id and
|
|
|
|
# similar values stored on mentions, stream mentions, and
|
|
|
|
# similar syntax in the rendered HTML.
|
2024-05-30 02:57:56 +02:00
|
|
|
soup = BeautifulSoup(message[rendered_content_key], "html.parser")
|
2019-05-23 13:58:10 +02:00
|
|
|
|
|
|
|
user_mentions = soup.findAll("span", {"class": "user-mention"})
|
|
|
|
if len(user_mentions) != 0:
|
|
|
|
user_id_map = ID_MAP["user_profile"]
|
|
|
|
for mention in user_mentions:
|
2019-06-18 20:13:32 +02:00
|
|
|
if not mention.has_attr("data-user-id"):
|
2019-06-18 19:35:01 +02:00
|
|
|
# Legacy mentions don't have a data-user-id
|
|
|
|
# field; we should just import them
|
|
|
|
# unmodified.
|
|
|
|
continue
|
2021-02-12 08:20:45 +01:00
|
|
|
if mention["data-user-id"] == "*":
|
2019-06-18 19:35:01 +02:00
|
|
|
# No rewriting is required for wildcard mentions
|
|
|
|
continue
|
2019-05-23 13:58:10 +02:00
|
|
|
old_user_id = int(mention["data-user-id"])
|
|
|
|
if old_user_id in user_id_map:
|
|
|
|
mention["data-user-id"] = str(user_id_map[old_user_id])
|
2024-05-30 02:57:56 +02:00
|
|
|
message[rendered_content_key] = str(soup)
|
2019-05-28 13:06:48 +02:00
|
|
|
|
|
|
|
stream_mentions = soup.findAll("a", {"class": "stream"})
|
|
|
|
if len(stream_mentions) != 0:
|
|
|
|
stream_id_map = ID_MAP["stream"]
|
|
|
|
for mention in stream_mentions:
|
|
|
|
old_stream_id = int(mention["data-stream-id"])
|
|
|
|
if old_stream_id in stream_id_map:
|
|
|
|
mention["data-stream-id"] = str(stream_id_map[old_stream_id])
|
2024-05-30 02:57:56 +02:00
|
|
|
message[rendered_content_key] = str(soup)
|
2019-05-28 13:47:41 +02:00
|
|
|
|
|
|
|
user_group_mentions = soup.findAll("span", {"class": "user-group-mention"})
|
|
|
|
if len(user_group_mentions) != 0:
|
|
|
|
user_group_id_map = ID_MAP["usergroup"]
|
|
|
|
for mention in user_group_mentions:
|
|
|
|
old_user_group_id = int(mention["data-user-group-id"])
|
|
|
|
if old_user_group_id in user_group_id_map:
|
|
|
|
mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id])
|
2024-05-30 02:57:56 +02:00
|
|
|
message[rendered_content_key] = str(soup)
|
2018-08-10 00:15:50 +02:00
|
|
|
continue
|
|
|
|
|
2018-08-09 17:50:43 +02:00
|
|
|
try:
|
2024-05-30 02:57:56 +02:00
|
|
|
content = message[content_key]
|
2018-11-02 14:05:52 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
sender_id = message["sender_id"]
|
2018-11-02 14:05:52 +01:00
|
|
|
sender = sender_map[sender_id]
|
2021-02-12 08:20:45 +01:00
|
|
|
sent_by_bot = sender["is_bot"]
|
|
|
|
translate_emoticons = sender["translate_emoticons"]
|
2018-11-02 14:05:52 +01:00
|
|
|
|
|
|
|
# We don't handle alert words on import from third-party
|
|
|
|
# platforms, since they generally don't have an "alert
|
|
|
|
# words" type feature, and notifications aren't important anyway.
|
2019-02-11 15:19:38 +01:00
|
|
|
realm_alert_words_automaton = None
|
2018-11-02 14:05:52 +01:00
|
|
|
|
2020-06-27 22:47:06 +02:00
|
|
|
rendered_content = markdown_convert(
|
2018-11-02 11:27:30 +01:00
|
|
|
content=content,
|
2019-02-11 15:19:38 +01:00
|
|
|
realm_alert_words_automaton=realm_alert_words_automaton,
|
2020-06-23 00:37:25 +02:00
|
|
|
message_realm=realm,
|
2018-11-02 14:05:52 +01:00
|
|
|
sent_by_bot=sent_by_bot,
|
|
|
|
translate_emoticons=translate_emoticons,
|
2021-06-17 12:20:40 +02:00
|
|
|
).rendered_content
|
2018-11-02 14:15:11 +01:00
|
|
|
|
2024-05-30 02:57:56 +02:00
|
|
|
message[rendered_content_key] = rendered_content
|
2023-04-29 20:45:22 +02:00
|
|
|
if "scheduled_timestamp" not in message:
|
|
|
|
# This logic runs also for ScheduledMessage, which doesn't use
|
|
|
|
# the rendered_content_version field.
|
|
|
|
message["rendered_content_version"] = markdown_version
|
2018-08-09 17:50:43 +02:00
|
|
|
except Exception:
|
2018-11-02 11:27:30 +01:00
|
|
|
# This generally happens with two possible causes:
|
2020-08-11 01:47:49 +02:00
|
|
|
# * rendering Markdown throwing an uncaught exception
|
|
|
|
# * rendering Markdown failing with the exception being
|
2024-05-20 22:16:21 +02:00
|
|
|
# caught in Markdown (which then returns None, causing the
|
2018-11-02 11:27:30 +01:00
|
|
|
# rendered_content assert above to fire).
|
2021-02-12 08:19:30 +01:00
|
|
|
logging.warning(
|
2021-02-12 08:20:45 +01:00
|
|
|
"Error in Markdown rendering for message ID %s; continuing", message["id"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
|
2018-08-09 17:50:43 +02:00
|
|
|
|
2024-05-30 02:57:56 +02:00
|
|
|
def fix_message_edit_history(
|
2024-07-12 02:30:17 +02:00
|
|
|
realm: Realm, sender_map: dict[int, Record], messages: list[Record]
|
2024-05-30 02:57:56 +02:00
|
|
|
) -> None:
|
|
|
|
user_id_map = ID_MAP["user_profile"]
|
|
|
|
for message in messages:
|
|
|
|
edit_history_json = message.get("edit_history")
|
|
|
|
if not edit_history_json:
|
|
|
|
continue
|
|
|
|
|
|
|
|
edit_history = orjson.loads(edit_history_json)
|
|
|
|
for edit_history_message_dict in edit_history:
|
|
|
|
edit_history_message_dict["user_id"] = user_id_map[edit_history_message_dict["user_id"]]
|
|
|
|
|
|
|
|
fix_message_rendered_content(
|
|
|
|
realm,
|
|
|
|
sender_map,
|
|
|
|
messages=edit_history,
|
|
|
|
content_key="prev_content",
|
|
|
|
rendered_content_key="prev_rendered_content",
|
|
|
|
)
|
|
|
|
|
|
|
|
message["edit_history"] = orjson.dumps(edit_history).decode()
|
|
|
|
|
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def current_table_ids(data: TableData, table: TableName) -> list[int]:
|
2018-04-23 23:28:27 +02:00
|
|
|
"""
|
|
|
|
Returns the ids present in the current table
|
|
|
|
"""
|
2023-07-31 22:52:35 +02:00
|
|
|
return [item["id"] for item in data[table]]
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-08-14 04:56:06 +02:00
|
|
|
def idseq(model_class: Any, cursor: CursorWrapper) -> str:
|
|
|
|
sequences = connection.introspection.get_sequences(cursor, model_class._meta.db_table)
|
|
|
|
for sequence in sequences:
|
|
|
|
if sequence["column"] == "id":
|
|
|
|
return sequence["name"]
|
|
|
|
raise Exception(f"No sequence found for 'id' of {model_class}")
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def allocate_ids(model_class: Any, count: int) -> list[int]:
|
2018-04-23 23:28:27 +02:00
|
|
|
"""
|
|
|
|
Increases the sequence number for a given table by the amount of objects being
|
2020-10-23 02:43:28 +02:00
|
|
|
imported into that table. Hence, this gives a reserved range of IDs to import the
|
|
|
|
converted Slack objects into the tables.
|
2018-04-23 23:28:27 +02:00
|
|
|
"""
|
2024-08-14 04:56:06 +02:00
|
|
|
with connection.cursor() as cursor:
|
|
|
|
sequence = idseq(model_class, cursor)
|
|
|
|
cursor.execute("select nextval(%s) from generate_series(1, %s)", [sequence, count])
|
|
|
|
query = cursor.fetchall() # Each element in the result is a tuple like (5,)
|
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
# convert List[Tuple[int]] to List[int]
|
|
|
|
return [item[0] for item in query]
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2018-04-23 23:28:27 +02:00
|
|
|
When Django gives us dict objects via model_to_dict, the foreign
|
|
|
|
key fields are `foo`, but we want `foo_id` for the bulk insert.
|
|
|
|
This function handles the simple case where we simply rename
|
|
|
|
the fields. For cases where we need to munge ids in the
|
|
|
|
database, see re_map_foreign_keys.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2018-04-23 23:28:27 +02:00
|
|
|
for item in data[table]:
|
|
|
|
item[field_name + "_id"] = item[field_name]
|
|
|
|
del item[field_name]
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def re_map_foreign_keys(
|
|
|
|
data: TableData,
|
|
|
|
table: TableName,
|
|
|
|
field_name: Field,
|
|
|
|
related_table: TableName,
|
|
|
|
verbose: bool = False,
|
|
|
|
id_field: bool = False,
|
|
|
|
recipient_field: bool = False,
|
|
|
|
) -> None:
|
2018-04-23 23:28:27 +02:00
|
|
|
"""
|
|
|
|
This is a wrapper function for all the realm data tables
|
|
|
|
and only avatar and attachment records need to be passed through the internal function
|
|
|
|
because of the difference in data format (TableData corresponding to realm data tables
|
|
|
|
and List[Record] corresponding to the avatar and attachment records)
|
|
|
|
"""
|
2018-11-05 14:45:10 +01:00
|
|
|
|
|
|
|
# See comments in bulk_import_user_message_data.
|
2024-07-16 11:30:18 +02:00
|
|
|
assert related_table != "usermessage"
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
re_map_foreign_keys_internal(
|
|
|
|
data[table],
|
|
|
|
table,
|
|
|
|
field_name,
|
|
|
|
related_table,
|
|
|
|
verbose,
|
|
|
|
id_field,
|
|
|
|
recipient_field,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def re_map_foreign_keys_internal(
|
2024-07-12 02:30:17 +02:00
|
|
|
data_table: list[Record],
|
2021-02-12 08:19:30 +01:00
|
|
|
table: TableName,
|
|
|
|
field_name: Field,
|
|
|
|
related_table: TableName,
|
|
|
|
verbose: bool = False,
|
|
|
|
id_field: bool = False,
|
|
|
|
recipient_field: bool = False,
|
|
|
|
) -> None:
|
|
|
|
"""
|
2018-04-23 23:28:27 +02:00
|
|
|
We occasionally need to assign new ids to rows during the
|
|
|
|
import/export process, to accommodate things like existing rows
|
|
|
|
already being in tables. See bulk_import_client for more context.
|
|
|
|
|
|
|
|
The tricky part is making sure that foreign key references
|
|
|
|
are in sync with the new ids, and this fixer function does
|
|
|
|
the re-mapping. (It also appends `_id` to the field.)
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2018-10-17 20:15:52 +02:00
|
|
|
lookup_table = ID_MAP[related_table]
|
2018-04-23 23:28:27 +02:00
|
|
|
for item in data_table:
|
2018-05-25 18:54:22 +02:00
|
|
|
old_id = item[field_name]
|
2018-04-23 23:28:27 +02:00
|
|
|
if recipient_field:
|
2021-02-12 08:20:45 +01:00
|
|
|
if related_table == "stream" and item["type"] == 2:
|
2018-04-23 23:28:27 +02:00
|
|
|
pass
|
2021-02-12 08:20:45 +01:00
|
|
|
elif related_table == "user_profile" and item["type"] == 1:
|
2018-04-23 23:28:27 +02:00
|
|
|
pass
|
2021-02-12 08:20:45 +01:00
|
|
|
elif related_table == "huddle" and item["type"] == 3:
|
2024-07-04 14:05:48 +02:00
|
|
|
# save the recipient id with the direct message group id, so that
|
|
|
|
# we can extract the user_profile ids involved in a direct message
|
|
|
|
# group with the help of the subscription object
|
|
|
|
# check function 'get_direct_message_groups_from_subscription'
|
2021-02-12 08:20:45 +01:00
|
|
|
ID_MAP["recipient_to_huddle_map"][item["id"]] = lookup_table[old_id]
|
2018-04-23 23:28:27 +02:00
|
|
|
else:
|
|
|
|
continue
|
|
|
|
old_id = item[field_name]
|
|
|
|
if old_id in lookup_table:
|
|
|
|
new_id = lookup_table[old_id]
|
|
|
|
if verbose:
|
2021-02-12 08:19:30 +01:00
|
|
|
logging.info(
|
2021-02-12 08:20:45 +01:00
|
|
|
"Remapping %s %s from %s to %s", table, field_name + "_id", old_id, new_id
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2018-04-23 23:28:27 +02:00
|
|
|
else:
|
|
|
|
new_id = old_id
|
|
|
|
if not id_field:
|
|
|
|
item[field_name + "_id"] = new_id
|
|
|
|
del item[field_name]
|
|
|
|
else:
|
2021-12-05 13:01:21 +01:00
|
|
|
item[field_name] = new_id
|
|
|
|
|
|
|
|
|
|
|
|
def re_map_realm_emoji_codes(data: TableData, *, table_name: str) -> None:
|
|
|
|
"""
|
|
|
|
Some tables, including Reaction and UserStatus, contain a form of
|
|
|
|
foreign key reference to the RealmEmoji table in the form of
|
|
|
|
`str(realm_emoji.id)` when `reaction_type="realm_emoji"`.
|
|
|
|
|
|
|
|
See the block comment for emoji_code in the AbstractEmoji
|
|
|
|
definition for more details.
|
|
|
|
"""
|
|
|
|
realm_emoji_dct = {}
|
|
|
|
|
|
|
|
for row in data["zerver_realmemoji"]:
|
|
|
|
realm_emoji_dct[row["id"]] = row
|
|
|
|
|
|
|
|
for row in data[table_name]:
|
|
|
|
if row["reaction_type"] == Reaction.REALM_EMOJI:
|
|
|
|
old_realm_emoji_id = int(row["emoji_code"])
|
|
|
|
|
|
|
|
# Fail hard here if we didn't map correctly here
|
|
|
|
new_realm_emoji_id = ID_MAP["realmemoji"][old_realm_emoji_id]
|
|
|
|
|
|
|
|
# This is a very important sanity check.
|
|
|
|
realm_emoji_row = realm_emoji_dct[new_realm_emoji_id]
|
|
|
|
assert realm_emoji_row["name"] == row["emoji_name"]
|
|
|
|
|
|
|
|
# Now update emoji_code to the new id.
|
|
|
|
row["emoji_code"] = str(new_realm_emoji_id)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def re_map_foreign_keys_many_to_many(
|
|
|
|
data: TableData,
|
|
|
|
table: TableName,
|
|
|
|
field_name: Field,
|
|
|
|
related_table: TableName,
|
|
|
|
verbose: bool = False,
|
|
|
|
) -> None:
|
2018-07-16 16:38:29 +02:00
|
|
|
"""
|
|
|
|
We need to assign new ids to rows during the import/export
|
|
|
|
process.
|
|
|
|
|
|
|
|
The tricky part is making sure that foreign key references
|
|
|
|
are in sync with the new ids, and this wrapper function does
|
|
|
|
the re-mapping only for ManyToMany fields.
|
|
|
|
"""
|
|
|
|
for item in data[table]:
|
2018-07-23 18:38:46 +02:00
|
|
|
old_id_list = item[field_name]
|
2018-07-16 16:38:29 +02:00
|
|
|
new_id_list = re_map_foreign_keys_many_to_many_internal(
|
2021-02-12 08:19:30 +01:00
|
|
|
table, field_name, related_table, old_id_list, verbose
|
|
|
|
)
|
2018-07-16 16:38:29 +02:00
|
|
|
item[field_name] = new_id_list
|
|
|
|
del item[field_name]
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def re_map_foreign_keys_many_to_many_internal(
|
|
|
|
table: TableName,
|
|
|
|
field_name: Field,
|
|
|
|
related_table: TableName,
|
2024-07-12 02:30:17 +02:00
|
|
|
old_id_list: list[int],
|
2021-02-12 08:19:30 +01:00
|
|
|
verbose: bool = False,
|
2024-07-12 02:30:17 +02:00
|
|
|
) -> list[int]:
|
2018-07-16 16:38:29 +02:00
|
|
|
"""
|
|
|
|
This is an internal function for tables with ManyToMany fields,
|
|
|
|
which takes the old ID list of the ManyToMany relation and returns the
|
|
|
|
new updated ID list.
|
|
|
|
"""
|
2018-10-17 20:15:52 +02:00
|
|
|
lookup_table = ID_MAP[related_table]
|
2018-07-16 16:38:29 +02:00
|
|
|
new_id_list = []
|
|
|
|
for old_id in old_id_list:
|
|
|
|
if old_id in lookup_table:
|
|
|
|
new_id = lookup_table[old_id]
|
|
|
|
if verbose:
|
2021-02-12 08:19:30 +01:00
|
|
|
logging.info(
|
2021-02-12 08:20:45 +01:00
|
|
|
"Remapping %s %s from %s to %s", table, field_name + "_id", old_id, new_id
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2018-07-16 16:38:29 +02:00
|
|
|
else:
|
|
|
|
new_id = old_id
|
|
|
|
new_id_list.append(new_id)
|
|
|
|
return new_id_list
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
def fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None:
|
|
|
|
for item in data[table]:
|
2021-02-12 08:20:45 +01:00
|
|
|
item[field_name] = item[field_name + "_mask"]
|
|
|
|
del item[field_name + "_mask"]
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-11-28 16:56:04 +01:00
|
|
|
def remove_denormalized_recipient_column_from_data(data: TableData) -> None:
|
|
|
|
"""
|
|
|
|
The recipient column shouldn't be imported, we'll set the correct values
|
|
|
|
when Recipient table gets imported.
|
|
|
|
"""
|
2021-02-12 08:20:45 +01:00
|
|
|
for stream_dict in data["zerver_stream"]:
|
2019-11-28 16:56:04 +01:00
|
|
|
if "recipient" in stream_dict:
|
|
|
|
del stream_dict["recipient"]
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
for user_profile_dict in data["zerver_userprofile"]:
|
|
|
|
if "recipient" in user_profile_dict:
|
|
|
|
del user_profile_dict["recipient"]
|
2019-11-28 16:56:04 +01:00
|
|
|
|
2024-07-04 14:05:48 +02:00
|
|
|
for direct_message_group_dict in data["zerver_huddle"]:
|
|
|
|
if "recipient" in direct_message_group_dict:
|
|
|
|
del direct_message_group_dict["recipient"]
|
2020-03-15 19:05:27 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-07-26 22:45:12 +02:00
|
|
|
def get_db_table(model_class: Any) -> str:
|
|
|
|
"""E.g. (RealmDomain -> 'zerver_realmdomain')"""
|
|
|
|
return model_class._meta.db_table
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-07-26 22:45:12 +02:00
|
|
|
def update_model_ids(model: Any, data: TableData, related_table: TableName) -> None:
|
|
|
|
table = get_db_table(model)
|
2018-11-05 14:45:10 +01:00
|
|
|
|
|
|
|
# Important: remapping usermessage rows is
|
2022-02-08 00:13:33 +01:00
|
|
|
# not only unnecessary, it's expensive and can cause
|
2018-11-05 14:45:10 +01:00
|
|
|
# memory errors. We don't even use ids from ID_MAP.
|
2024-07-16 11:30:18 +02:00
|
|
|
assert table != "usermessage"
|
2018-11-05 14:45:10 +01:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
old_id_list = current_table_ids(data, table)
|
|
|
|
allocated_id_list = allocate_ids(model, len(data[table]))
|
|
|
|
for item in range(len(data[table])):
|
|
|
|
update_id_map(related_table, old_id_list[item], allocated_id_list[item])
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, table, "id", related_table=related_table, id_field=True)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-10-12 23:42:17 +02:00
|
|
|
def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:
|
|
|
|
model = UserMessage
|
2021-02-12 08:20:45 +01:00
|
|
|
table = "zerver_usermessage"
|
2018-10-12 23:42:17 +02:00
|
|
|
lst = data[table]
|
|
|
|
|
2018-11-05 14:45:10 +01:00
|
|
|
# IMPORTANT NOTE: We do not use any primary id
|
|
|
|
# data from either the import itself or ID_MAP.
|
|
|
|
# We let the DB itself generate ids. Note that
|
|
|
|
# no tables use user_message.id as a foreign key,
|
|
|
|
# so we can safely avoid all re-mapping complexity.
|
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def process_batch(items: list[dict[str, Any]]) -> None:
|
2018-10-12 23:42:17 +02:00
|
|
|
ums = [
|
|
|
|
UserMessageLite(
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profile_id=item["user_profile_id"],
|
|
|
|
message_id=item["message_id"],
|
|
|
|
flags=item["flags"],
|
2018-10-12 23:42:17 +02:00
|
|
|
)
|
|
|
|
for item in items
|
|
|
|
]
|
|
|
|
bulk_insert_ums(ums)
|
|
|
|
|
|
|
|
chunk_size = 10000
|
|
|
|
|
2018-10-15 14:24:13 +02:00
|
|
|
process_list_in_batches(
|
|
|
|
lst=lst,
|
|
|
|
chunk_size=chunk_size,
|
|
|
|
process_batch=process_batch,
|
|
|
|
)
|
2018-10-12 23:42:17 +02:00
|
|
|
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
|
2018-10-12 23:42:17 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:23 +02:00
|
|
|
def bulk_import_model(data: TableData, model: Any, dump_file_id: str | None = None) -> None:
|
2018-07-26 22:45:12 +02:00
|
|
|
table = get_db_table(model)
|
2018-04-23 23:28:27 +02:00
|
|
|
# TODO, deprecate dump_file_id
|
|
|
|
model.objects.bulk_create(model(**item) for item in data[table])
|
|
|
|
if dump_file_id is None:
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Successfully imported %s from %s.", model, table)
|
2018-04-23 23:28:27 +02:00
|
|
|
else:
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-04-16 11:10:31 +02:00
|
|
|
def bulk_import_named_user_groups(data: TableData) -> None:
|
|
|
|
vals = [
|
|
|
|
(
|
|
|
|
group["usergroup_ptr_id"],
|
|
|
|
group["realm_for_sharding_id"],
|
2024-04-18 18:59:50 +02:00
|
|
|
group["name"],
|
|
|
|
group["description"],
|
|
|
|
group["is_system_group"],
|
2024-10-07 19:00:15 +02:00
|
|
|
group["can_add_members_group_id"],
|
2024-09-19 12:41:22 +02:00
|
|
|
group["can_join_group_id"],
|
2024-10-14 08:05:55 +02:00
|
|
|
group["can_leave_group_id"],
|
2023-08-21 12:06:41 +02:00
|
|
|
group["can_manage_group_id"],
|
2024-04-18 18:59:50 +02:00
|
|
|
group["can_mention_group_id"],
|
2024-05-15 15:16:05 +02:00
|
|
|
group["deactivated"],
|
2024-06-04 12:36:52 +02:00
|
|
|
group["date_created"],
|
2024-04-16 11:10:31 +02:00
|
|
|
)
|
|
|
|
for group in data["zerver_namedusergroup"]
|
|
|
|
]
|
|
|
|
|
|
|
|
query = SQL(
|
|
|
|
"""
|
2024-10-14 08:05:55 +02:00
|
|
|
INSERT INTO zerver_namedusergroup (usergroup_ptr_id, realm_id, name, description, is_system_group, can_add_members_group_id, can_join_group_id, can_leave_group_id, can_manage_group_id, can_mention_group_id, deactivated, date_created)
|
2024-04-16 11:10:31 +02:00
|
|
|
VALUES %s
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
with connection.cursor() as cursor:
|
|
|
|
execute_values(cursor.cursor, query, vals)
|
|
|
|
|
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
# Client is a table shared by multiple realms, so in order to
|
|
|
|
# correctly import multiple realms into the same server, we need to
|
|
|
|
# check if a Client object already exists, and so we need to support
|
|
|
|
# remap all Client IDs to the values in the new DB.
|
|
|
|
def bulk_import_client(data: TableData, model: Any, table: TableName) -> None:
|
|
|
|
for item in data[table]:
|
|
|
|
try:
|
2021-02-12 08:20:45 +01:00
|
|
|
client = Client.objects.get(name=item["name"])
|
2018-04-23 23:28:27 +02:00
|
|
|
except Client.DoesNotExist:
|
2021-02-12 08:20:45 +01:00
|
|
|
client = Client.objects.create(name=item["name"])
|
|
|
|
update_id_map(table="client", old_id=item["id"], new_id=client.id)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-02-14 00:03:40 +01:00
|
|
|
def fix_subscriptions_is_user_active_column(
|
2024-07-12 02:30:17 +02:00
|
|
|
data: TableData, user_profiles: list[UserProfile], crossrealm_user_ids: set[int]
|
2021-02-14 00:03:40 +01:00
|
|
|
) -> None:
|
|
|
|
table = get_db_table(Subscription)
|
|
|
|
user_id_to_active_status = {user.id: user.is_active for user in user_profiles}
|
|
|
|
for sub in data[table]:
|
2024-03-23 06:18:29 +01:00
|
|
|
if sub["user_profile_id"] in crossrealm_user_ids:
|
|
|
|
sub["is_user_active"] = True
|
|
|
|
else:
|
|
|
|
sub["is_user_active"] = user_id_to_active_status[sub["user_profile_id"]]
|
2021-02-14 00:03:40 +01:00
|
|
|
|
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def process_avatars(record: dict[str, Any]) -> None:
|
2024-06-25 21:03:49 +02:00
|
|
|
if not record["s3_path"].endswith(".original"):
|
|
|
|
return None
|
|
|
|
user_profile = get_user_profile_by_id(record["user_profile_id"])
|
|
|
|
if settings.LOCAL_AVATARS_DIR is not None:
|
2024-06-13 14:57:18 +02:00
|
|
|
avatar_path = user_avatar_base_path_from_ids(
|
|
|
|
user_profile.id, user_profile.avatar_version, record["realm_id"]
|
|
|
|
)
|
2024-06-25 21:03:49 +02:00
|
|
|
medium_file_path = os.path.join(settings.LOCAL_AVATARS_DIR, avatar_path) + "-medium.png"
|
|
|
|
if os.path.exists(medium_file_path):
|
|
|
|
# We remove the image here primarily to deal with
|
|
|
|
# issues when running the import script multiple
|
|
|
|
# times in development (where one might reuse the
|
|
|
|
# same realm ID from a previous iteration).
|
|
|
|
os.remove(medium_file_path)
|
|
|
|
try:
|
|
|
|
ensure_avatar_image(user_profile=user_profile, medium=True)
|
|
|
|
if record.get("importer_should_thumbnail"):
|
|
|
|
ensure_avatar_image(user_profile=user_profile)
|
|
|
|
except BadImageError:
|
|
|
|
logging.warning(
|
|
|
|
"Could not thumbnail avatar image for user %s; ignoring",
|
|
|
|
user_profile.id,
|
|
|
|
)
|
|
|
|
# Delete the record of the avatar to avoid 404s.
|
|
|
|
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=None)
|
2019-07-28 01:08:18 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-23 21:55:01 +02:00
|
|
|
def process_emojis(
|
2024-08-06 23:13:56 +02:00
|
|
|
import_dir: str,
|
|
|
|
default_user_profile_id: int | None,
|
|
|
|
filename_to_has_original: dict[str, bool],
|
|
|
|
record: dict[str, Any],
|
2024-07-23 21:55:01 +02:00
|
|
|
) -> None:
|
2024-08-06 23:13:56 +02:00
|
|
|
# 3rd party exports may not provide .original files. In that case we want to just
|
|
|
|
# treat whatever file we have as the original.
|
|
|
|
should_use_as_original = not filename_to_has_original[record["file_name"]]
|
|
|
|
if not (record["s3_path"].endswith(".original") or should_use_as_original):
|
2024-07-23 21:55:01 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
if "author_id" in record and record["author_id"] is not None:
|
|
|
|
user_profile = get_user_profile_by_id(record["author_id"])
|
|
|
|
else:
|
|
|
|
assert default_user_profile_id is not None
|
|
|
|
user_profile = get_user_profile_by_id(default_user_profile_id)
|
|
|
|
|
|
|
|
# file_name has the proper file extension without the
|
|
|
|
# .original suffix.
|
|
|
|
# application/octet-stream will be rejected by upload_emoji_image,
|
|
|
|
# but it's easier to use it here as the sensible default value
|
|
|
|
# and let upload_emoji_image figure out the exact error; or handle
|
|
|
|
# the file somehow anyway if it's ever changed to do that.
|
|
|
|
content_type = guess_type(record["file_name"])[0] or "application/octet-stream"
|
|
|
|
emoji_import_data_file_dath = os.path.join(import_dir, record["path"])
|
|
|
|
with open(emoji_import_data_file_dath, "rb") as f:
|
|
|
|
try:
|
|
|
|
# This will overwrite the files that got copied to the appropriate paths
|
|
|
|
# for emojis (whether in S3 or in the local uploads dir), ensuring to
|
|
|
|
# thumbnail them and generate stills for animated emojis.
|
2024-08-06 23:13:56 +02:00
|
|
|
is_animated = upload_emoji_image(f, record["file_name"], user_profile, content_type)
|
2024-07-23 21:55:01 +02:00
|
|
|
except BadImageError:
|
|
|
|
logging.warning(
|
|
|
|
"Could not thumbnail emoji image %s; ignoring",
|
|
|
|
record["s3_path"],
|
|
|
|
)
|
|
|
|
# TODO:: should we delete the RealmEmoji object, or keep it with the files
|
|
|
|
# that did get copied; even though they do generate this error?
|
2024-08-06 23:13:56 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
if is_animated and not record.get("deactivated", False):
|
|
|
|
# We only update the is_animated field if the emoji is not deactivated.
|
|
|
|
# That's because among deactivated emojis (name, realm_id) may not be
|
|
|
|
# unique, making the implementation here a bit hairier.
|
|
|
|
# Anyway, for Zulip exports, is_animated should be set correctly from the start,
|
|
|
|
# while 3rd party exports don't use the deactivated field, so this shouldn't
|
|
|
|
# particularly matter.
|
|
|
|
RealmEmoji.objects.filter(
|
2024-10-09 02:13:20 +02:00
|
|
|
file_name=record["file_name"], realm_id=user_profile.realm_id, deactivated=False
|
2024-08-06 23:13:56 +02:00
|
|
|
).update(is_animated=True)
|
2024-07-23 21:55:01 +02:00
|
|
|
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def import_uploads(
|
|
|
|
realm: Realm,
|
|
|
|
import_dir: Path,
|
|
|
|
processes: int,
|
2024-07-12 02:30:23 +02:00
|
|
|
default_user_profile_id: int | None = None,
|
2021-02-12 08:19:30 +01:00
|
|
|
processing_avatars: bool = False,
|
|
|
|
processing_emojis: bool = False,
|
|
|
|
processing_realm_icons: bool = False,
|
|
|
|
) -> None:
|
2018-12-06 01:00:29 +01:00
|
|
|
if processing_avatars and processing_emojis:
|
|
|
|
raise AssertionError("Cannot import avatars and emojis at the same time!")
|
|
|
|
if processing_avatars:
|
|
|
|
logging.info("Importing avatars")
|
|
|
|
elif processing_emojis:
|
|
|
|
logging.info("Importing emojis")
|
2019-07-19 19:15:23 +02:00
|
|
|
elif processing_realm_icons:
|
|
|
|
logging.info("Importing realm icons and logos")
|
2018-12-06 01:00:29 +01:00
|
|
|
else:
|
|
|
|
logging.info("Importing uploaded files")
|
|
|
|
|
|
|
|
records_filename = os.path.join(import_dir, "records.json")
|
2020-08-07 01:09:47 +02:00
|
|
|
with open(records_filename, "rb") as records_file:
|
2024-07-12 02:30:17 +02:00
|
|
|
records: list[dict[str, Any]] = orjson.loads(records_file.read())
|
2018-12-06 00:48:27 +01:00
|
|
|
timestamp = datetime_to_timestamp(timezone_now())
|
2018-12-06 00:24:42 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys_internal(
|
2021-02-12 08:20:45 +01:00
|
|
|
records, "records", "realm_id", related_table="realm", id_field=True
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-07-19 19:15:23 +02:00
|
|
|
if not processing_emojis and not processing_realm_icons:
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys_internal(
|
2021-02-12 08:20:45 +01:00
|
|
|
records, "records", "user_profile_id", related_table="user_profile", id_field=True
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2024-08-06 23:13:56 +02:00
|
|
|
if processing_emojis:
|
|
|
|
# We need to build a mapping telling us which emojis have an .original file.
|
|
|
|
# This will be used when thumbnailing them later, to know whether we have that
|
|
|
|
# file available or whether we should just treat the regular image as the original
|
|
|
|
# for thumbnailing.
|
|
|
|
filename_to_has_original = {record["file_name"]: False for record in records}
|
|
|
|
for record in records:
|
|
|
|
if record["s3_path"].endswith(".original"):
|
|
|
|
filename_to_has_original[record["file_name"]] = True
|
|
|
|
|
|
|
|
if records and "author" in records[0]:
|
|
|
|
# This condition only guarantees author field appears in the generated
|
|
|
|
# records. Potentially the value of it might be None though. In that
|
|
|
|
# case, this will be ignored by the remap below.
|
|
|
|
# Any code further down the codepath that wants to use the author value
|
|
|
|
# needs to be mindful of it potentially being None and use a fallback
|
|
|
|
# value, most likely default_user_profile_id being the right choice.
|
|
|
|
re_map_foreign_keys_internal(
|
|
|
|
records, "records", "author", related_table="user_profile", id_field=False
|
|
|
|
)
|
2018-12-06 01:00:29 +01:00
|
|
|
|
|
|
|
s3_uploads = settings.LOCAL_UPLOADS_DIR is None
|
2018-12-06 00:38:34 +01:00
|
|
|
|
2018-12-06 01:00:29 +01:00
|
|
|
if s3_uploads:
|
2019-07-19 19:15:23 +02:00
|
|
|
if processing_avatars or processing_emojis or processing_realm_icons:
|
2018-12-06 01:00:29 +01:00
|
|
|
bucket_name = settings.S3_AVATAR_BUCKET
|
2018-12-06 00:38:34 +01:00
|
|
|
else:
|
2018-12-06 01:00:29 +01:00
|
|
|
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
|
2020-10-26 22:10:53 +01:00
|
|
|
bucket = get_bucket(bucket_name)
|
2018-12-06 00:32:49 +01:00
|
|
|
|
2024-02-02 01:38:52 +01:00
|
|
|
for count, record in enumerate(records, 1):
|
2018-04-23 23:28:27 +02:00
|
|
|
if processing_avatars:
|
2018-12-06 00:38:34 +01:00
|
|
|
# For avatars, we need to rehash the user ID with the
|
2018-04-23 23:28:27 +02:00
|
|
|
# new server's avatar salt
|
2024-06-13 14:57:18 +02:00
|
|
|
relative_path = user_avatar_base_path_from_ids(
|
|
|
|
record["user_profile_id"], record["avatar_version"], record["realm_id"]
|
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
if record["s3_path"].endswith(".original"):
|
|
|
|
relative_path += ".original"
|
2018-12-06 00:49:57 +01:00
|
|
|
else:
|
2024-06-25 21:03:49 +02:00
|
|
|
relative_path = upload_backend.get_avatar_path(relative_path, medium=False)
|
2018-06-18 18:58:44 +02:00
|
|
|
elif processing_emojis:
|
2018-04-23 23:28:27 +02:00
|
|
|
# For emojis we follow the function 'upload_emoji_image'
|
2018-12-06 00:38:34 +01:00
|
|
|
relative_path = RealmEmoji.PATH_ID_TEMPLATE.format(
|
2021-02-12 08:20:45 +01:00
|
|
|
realm_id=record["realm_id"], emoji_file_name=record["file_name"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2024-07-23 21:55:01 +02:00
|
|
|
if record["s3_path"].endswith(".original"):
|
|
|
|
relative_path += ".original"
|
2021-02-12 08:20:45 +01:00
|
|
|
record["last_modified"] = timestamp
|
2019-07-19 19:15:23 +02:00
|
|
|
elif processing_realm_icons:
|
|
|
|
icon_name = os.path.basename(record["path"])
|
2021-02-12 08:20:45 +01:00
|
|
|
relative_path = os.path.join(str(record["realm_id"]), "realm", icon_name)
|
|
|
|
record["last_modified"] = timestamp
|
2018-04-23 23:28:27 +02:00
|
|
|
else:
|
2021-08-01 17:07:22 +02:00
|
|
|
# This relative_path is basically the new location of the file,
|
|
|
|
# which will later be copied from its original location as
|
|
|
|
# specified in record["s3_path"].
|
2021-09-02 14:28:21 +02:00
|
|
|
relative_path = upload_backend.generate_message_upload_path(
|
|
|
|
str(record["realm_id"]), sanitize_name(os.path.basename(record["path"]))
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
thumbnail: Make thumbnailing work with data import.
We didn't have thumbnailing for images coming from data import and this
commit adds the functionality.
There are a few fundamental issues that the implementation needs to
solve.
1. The images come from an untrusted source and therefore we don't want
to just pass them through to thumbnailing without checking. For that
reason, we cannot just import ImageAttachment rows from the export
data, even for zulip=>zulip imports.
The right way to process images is to pass them to maybe_thumbail(),
which runs libvips_check_image() on them to verify we're okay with
thumbnailing, creates ImageAttachment rows for them and sends them
to the thumbnailing queue worker. This approach lets us handle both
zulip=>zulip and 3rd party=>zulip imports in the same way,
2. There is a somewhat circular dependency between the Message,
Attachment and ImageAttachment import process:
- ImageAttachments would ideally be created after importing
Attachments, but they need to already exist at the time of Message
import. Otherwise, the markdown processor doesn't know it has to add
HTML for image previews to messages that reference images. This would
mean that messages imported from 3rd party tools don't get image
previews.
- Attachments only get created after Message import however, due to the
many-to-many relationship between Message and Attachment.
This is solved by fixing up some data of Attachments pre-emptively, such
as the path_ids. This gives us the necessary information for creating
ImageAttachments before importing Messages.
While we generate ImageAttachment rows synchronously, the actual
thumbnailing job is sent to the queue worker. Theoretically, the worker
could be very backlogged and not process the thumbnails anytime soon.
This is fine - if the app is loaded and tries to display a message with
such a not-yet-generated thumbnail, the code in `serve_file` will
generate the thumbnails synchronously on the fly and the user will see
the image preview displayed normally. See:
https://github.com/zulip/zulip/blob/1b47134d0d564f8ba4961d25743f3ad0f09e6dfb/zerver/views/upload.py#L333-L342
2024-10-17 21:20:49 +02:00
|
|
|
path_maps["old_attachment_path_to_new_path"][record["s3_path"]] = relative_path
|
|
|
|
path_maps["new_attachment_path_to_old_path"][relative_path] = record["s3_path"]
|
|
|
|
path_maps["new_attachment_path_to_local_data_path"][relative_path] = os.path.join(
|
|
|
|
import_dir, record["path"]
|
|
|
|
)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2018-12-06 01:00:29 +01:00
|
|
|
if s3_uploads:
|
2018-12-07 17:52:01 +01:00
|
|
|
key = bucket.Object(relative_path)
|
|
|
|
metadata = {}
|
2022-04-28 23:36:14 +02:00
|
|
|
if "user_profile_id" not in record:
|
|
|
|
# This should never happen for uploads or avatars; if
|
|
|
|
# so, it is an error, default_user_profile_id will be
|
|
|
|
# None, and we assert. For emoji / realm icons, we
|
|
|
|
# fall back to default_user_profile_id.
|
|
|
|
assert default_user_profile_id is not None
|
|
|
|
metadata["user_profile_id"] = str(default_user_profile_id)
|
2019-07-19 19:15:23 +02:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profile_id = int(record["user_profile_id"])
|
2018-12-06 01:00:29 +01:00
|
|
|
# Support email gateway bot and other cross-realm messages
|
|
|
|
if user_profile_id in ID_MAP["user_profile"]:
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Uploaded by ID mapped user: %s!", user_profile_id)
|
2018-12-06 01:00:29 +01:00
|
|
|
user_profile_id = ID_MAP["user_profile"][user_profile_id]
|
|
|
|
user_profile = get_user_profile_by_id(user_profile_id)
|
2018-12-07 17:52:01 +01:00
|
|
|
metadata["user_profile_id"] = str(user_profile.id)
|
2018-12-06 01:00:29 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "last_modified" in record:
|
|
|
|
metadata["orig_last_modified"] = str(record["last_modified"])
|
|
|
|
metadata["realm_id"] = str(record["realm_id"])
|
2018-12-06 01:00:29 +01:00
|
|
|
|
|
|
|
# Zulip exports will always have a content-type, but third-party exports might not.
|
|
|
|
content_type = record.get("content_type")
|
|
|
|
if content_type is None:
|
2021-02-12 08:20:45 +01:00
|
|
|
content_type = guess_type(record["s3_path"])[0]
|
2018-12-30 07:13:11 +01:00
|
|
|
if content_type is None:
|
|
|
|
# This is the default for unknown data. Note that
|
|
|
|
# for `.original` files, this is the value we'll
|
|
|
|
# set; that is OK, because those are never served
|
|
|
|
# directly anyway.
|
2021-02-12 08:20:45 +01:00
|
|
|
content_type = "application/octet-stream"
|
2018-12-06 01:00:29 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
key.upload_file(
|
2021-08-10 02:11:16 +02:00
|
|
|
Filename=os.path.join(import_dir, record["path"]),
|
2021-02-12 08:20:45 +01:00
|
|
|
ExtraArgs={"ContentType": content_type, "Metadata": metadata},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2018-04-23 23:28:27 +02:00
|
|
|
else:
|
2021-08-18 17:54:22 +02:00
|
|
|
assert settings.LOCAL_UPLOADS_DIR is not None
|
2022-12-12 22:02:25 +01:00
|
|
|
assert settings.LOCAL_AVATARS_DIR is not None
|
|
|
|
assert settings.LOCAL_FILES_DIR is not None
|
2019-07-19 19:15:23 +02:00
|
|
|
if processing_avatars or processing_emojis or processing_realm_icons:
|
2022-12-12 22:02:25 +01:00
|
|
|
file_path = os.path.join(settings.LOCAL_AVATARS_DIR, relative_path)
|
2018-12-06 01:00:29 +01:00
|
|
|
else:
|
2022-12-12 22:02:25 +01:00
|
|
|
file_path = os.path.join(settings.LOCAL_FILES_DIR, relative_path)
|
2021-02-12 08:20:45 +01:00
|
|
|
orig_file_path = os.path.join(import_dir, record["path"])
|
2018-12-06 01:00:29 +01:00
|
|
|
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
|
|
shutil.copy(orig_file_path, file_path)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2024-02-02 01:38:52 +01:00
|
|
|
if count % 1000 == 0:
|
|
|
|
logging.info("Processed %s/%s uploads", count, len(records))
|
|
|
|
|
2024-07-23 21:55:01 +02:00
|
|
|
if processing_avatars or processing_emojis:
|
|
|
|
if processing_avatars:
|
|
|
|
process_func = process_avatars
|
|
|
|
else:
|
|
|
|
assert processing_emojis
|
2024-08-06 23:13:56 +02:00
|
|
|
|
2024-07-23 21:55:01 +02:00
|
|
|
process_func = partial(
|
|
|
|
process_emojis,
|
|
|
|
import_dir,
|
|
|
|
default_user_profile_id,
|
2024-08-06 23:13:56 +02:00
|
|
|
filename_to_has_original,
|
2024-07-23 21:55:01 +02:00
|
|
|
)
|
|
|
|
|
2018-12-06 00:35:16 +01:00
|
|
|
# Ensure that we have medium-size avatar images for every
|
2024-07-23 21:55:01 +02:00
|
|
|
# avatar and properly thumbnailed emojis with stills (for animated emoji).
|
|
|
|
# TODO: This implementation is hacky, both in that it
|
2018-12-06 00:35:16 +01:00
|
|
|
# does get_user_profile_by_id for each user, and in that it
|
|
|
|
# might be better to require the export to just have these.
|
2019-01-25 20:40:49 +01:00
|
|
|
if processes == 1:
|
|
|
|
for record in records:
|
2024-07-23 21:55:01 +02:00
|
|
|
process_func(record)
|
2019-01-25 20:40:49 +01:00
|
|
|
else:
|
|
|
|
connection.close()
|
2022-10-08 06:10:17 +02:00
|
|
|
_cache = cache._cache # type: ignore[attr-defined] # not in stubs
|
2022-07-19 00:14:23 +02:00
|
|
|
assert isinstance(_cache, bmemcached.Client)
|
|
|
|
_cache.disconnect_all()
|
2022-07-29 09:10:34 +02:00
|
|
|
with ProcessPoolExecutor(max_workers=processes) as executor:
|
|
|
|
for future in as_completed(
|
2024-07-23 21:55:01 +02:00
|
|
|
executor.submit(process_func, record) for record in records
|
2022-07-29 09:10:34 +02:00
|
|
|
):
|
|
|
|
future.result()
|
2018-12-06 00:35:16 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-02-05 23:52:25 +01:00
|
|
|
def disable_restricted_authentication_methods(data: TableData) -> None:
|
|
|
|
"""
|
|
|
|
Should run only with settings.BILLING_ENABLED. Ensures that we only
|
|
|
|
enable authentication methods that are available without needing a plan.
|
|
|
|
If the organization upgrades to a paid plan, or gets a sponsorship,
|
|
|
|
they can enable the restricted authentication methods in their settings.
|
|
|
|
"""
|
|
|
|
realm_authentication_methods = data["zerver_realmauthenticationmethod"]
|
|
|
|
non_restricted_methods = []
|
|
|
|
for auth_method in realm_authentication_methods:
|
|
|
|
if AUTH_BACKEND_NAME_MAP[auth_method["name"]].available_for_cloud_plans is None:
|
|
|
|
non_restricted_methods.append(auth_method)
|
|
|
|
else:
|
|
|
|
logging.warning("Dropped restricted authentication method: %s", auth_method["name"])
|
|
|
|
|
|
|
|
data["zerver_realmauthenticationmethod"] = non_restricted_methods
|
|
|
|
|
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
# Importing data suffers from a difficult ordering problem because of
|
|
|
|
# models that reference each other circularly. Here is a correct order.
|
|
|
|
#
|
2023-04-29 21:07:48 +02:00
|
|
|
# (Note that this list is not exhaustive and only talks about the main,
|
|
|
|
# most important models. There's a bunch of minor models that are handled
|
|
|
|
# separately and not mentioned here - but following the principle that we
|
|
|
|
# have to import the dependencies first.)
|
|
|
|
#
|
2018-04-23 23:28:27 +02:00
|
|
|
# * Client [no deps]
|
2024-02-07 12:13:02 +01:00
|
|
|
# * Realm [-announcements_streams,-group_permissions]
|
2022-07-20 18:58:32 +02:00
|
|
|
# * UserGroup
|
2018-04-23 23:28:27 +02:00
|
|
|
# * Stream [only depends on realm]
|
2024-02-07 12:13:02 +01:00
|
|
|
# * Realm's announcements_streams and group_permissions
|
2018-04-23 23:28:27 +02:00
|
|
|
# * UserProfile, in order by ID to avoid bot loop issues
|
2023-04-29 21:07:48 +02:00
|
|
|
# * Now can do all realm_tables
|
2024-07-05 13:13:40 +02:00
|
|
|
# * DirectMessageGroup
|
2018-04-23 23:28:27 +02:00
|
|
|
# * Recipient
|
|
|
|
# * Subscription
|
|
|
|
# * Message
|
|
|
|
# * UserMessage
|
|
|
|
#
|
|
|
|
# Because the Python object => JSON conversion process is not fully
|
|
|
|
# faithful, we have to use a set of fixers (e.g. on DateTime objects
|
2021-05-10 07:02:14 +02:00
|
|
|
# and foreign keys) to do the import correctly.
|
2021-02-12 08:19:30 +01:00
|
|
|
def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Realm:
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Importing realm dump %s", import_dir)
|
2018-04-23 23:28:27 +02:00
|
|
|
if not os.path.exists(import_dir):
|
|
|
|
raise Exception("Missing import directory!")
|
|
|
|
|
|
|
|
realm_data_filename = os.path.join(import_dir, "realm.json")
|
|
|
|
if not os.path.exists(realm_data_filename):
|
|
|
|
raise Exception("Missing realm.json file!")
|
|
|
|
|
2020-04-02 21:42:08 +02:00
|
|
|
if not server_initialized():
|
|
|
|
create_internal_realm()
|
|
|
|
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Importing realm data from %s", realm_data_filename)
|
2020-08-07 01:09:47 +02:00
|
|
|
with open(realm_data_filename, "rb") as f:
|
|
|
|
data = orjson.loads(f.read())
|
2023-07-07 21:53:38 +02:00
|
|
|
|
|
|
|
# Merge in zerver_userprofile_mirrordummy
|
2024-07-14 21:06:04 +02:00
|
|
|
data["zerver_userprofile"] += data["zerver_userprofile_mirrordummy"]
|
2023-07-07 21:53:38 +02:00
|
|
|
del data["zerver_userprofile_mirrordummy"]
|
|
|
|
data["zerver_userprofile"].sort(key=lambda r: r["id"])
|
|
|
|
|
2019-11-28 16:56:04 +01:00
|
|
|
remove_denormalized_recipient_column_from_data(data)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
sort_by_date = data.get("sort_by_date", False)
|
2018-10-16 12:34:47 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
bulk_import_client(data, Client, "zerver_client")
|
2018-09-21 19:58:24 +02:00
|
|
|
|
2024-04-21 03:25:34 +02:00
|
|
|
# Remap the user IDs for notification_bot and friends to their
|
|
|
|
# appropriate IDs on this server
|
|
|
|
internal_realm = get_realm(settings.SYSTEM_BOT_REALM)
|
|
|
|
crossrealm_user_ids = set()
|
|
|
|
for item in data["zerver_userprofile_crossrealm"]:
|
|
|
|
logging.info(
|
|
|
|
"Adding to ID map: %s %s",
|
|
|
|
item["id"],
|
|
|
|
get_system_bot(item["email"], internal_realm.id).id,
|
|
|
|
)
|
|
|
|
new_user_id = get_system_bot(item["email"], internal_realm.id).id
|
|
|
|
update_id_map(table="user_profile", old_id=item["id"], new_id=new_user_id)
|
|
|
|
crossrealm_user_ids.add(new_user_id)
|
|
|
|
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
|
|
|
|
update_id_map(table="recipient", old_id=item["recipient_id"], new_id=new_recipient_id)
|
|
|
|
|
|
|
|
# We first do a pass of updating model IDs for the cluster of
|
|
|
|
# major models that have foreign keys into each other.
|
|
|
|
# TODO: Should we just do this for all tables at the start?
|
|
|
|
update_model_ids(Realm, data, "realm")
|
2021-02-12 08:20:45 +01:00
|
|
|
update_model_ids(Stream, data, "stream")
|
2024-04-21 03:25:34 +02:00
|
|
|
update_model_ids(UserProfile, data, "user_profile")
|
|
|
|
if "zerver_usergroup" in data:
|
|
|
|
update_model_ids(UserGroup, data, "usergroup")
|
2024-05-26 02:38:57 +02:00
|
|
|
if "zerver_presencesequence" in data:
|
|
|
|
update_model_ids(PresenceSequence, data, "presencesequence")
|
2024-04-21 03:25:34 +02:00
|
|
|
|
|
|
|
# Now we prepare to import the Realm table
|
2024-02-07 12:13:02 +01:00
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_realm", "new_stream_announcements_stream", related_table="stream"
|
|
|
|
)
|
2024-02-07 17:11:43 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_realm", "signup_announcements_stream", related_table="stream")
|
2024-01-26 14:45:37 +01:00
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_realm", "zulip_update_announcements_stream", related_table="stream"
|
|
|
|
)
|
2023-08-09 15:06:56 +02:00
|
|
|
if "zerver_usergroup" in data:
|
|
|
|
for setting_name in Realm.REALM_PERMISSION_GROUP_SETTINGS:
|
|
|
|
re_map_foreign_keys(data, "zerver_realm", setting_name, related_table="usergroup")
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "zerver_realm")
|
2018-04-23 23:28:27 +02:00
|
|
|
# Fix realm subdomain information
|
2021-02-12 08:20:45 +01:00
|
|
|
data["zerver_realm"][0]["string_id"] = subdomain
|
|
|
|
data["zerver_realm"][0]["name"] = subdomain
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2023-04-03 16:05:22 +02:00
|
|
|
# Create the realm, but mark it deactivated for now, while we
|
|
|
|
# import the supporting data structures, which may take a bit.
|
|
|
|
realm_properties = dict(**data["zerver_realm"][0])
|
|
|
|
realm_properties["deactivated"] = True
|
2018-09-21 05:39:35 +02:00
|
|
|
|
2023-11-23 22:07:41 +01:00
|
|
|
# Initialize whether we expect push notifications to work.
|
|
|
|
realm_properties["push_notifications_enabled"] = sends_notifications_directly()
|
|
|
|
|
2023-06-07 14:24:12 +02:00
|
|
|
with transaction.atomic(durable=True):
|
|
|
|
realm = Realm(**realm_properties)
|
2023-08-09 15:06:56 +02:00
|
|
|
if "zerver_usergroup" not in data:
|
|
|
|
# For now a dummy value of -1 is given to groups fields which
|
|
|
|
# is changed later before the transaction is committed.
|
2023-10-31 12:28:49 +01:00
|
|
|
for permission_configuration in Realm.REALM_PERMISSION_GROUP_SETTINGS.values():
|
|
|
|
setattr(realm, permission_configuration.id_field_name, -1)
|
2023-08-09 15:06:56 +02:00
|
|
|
|
2023-06-07 14:24:12 +02:00
|
|
|
realm.save()
|
|
|
|
|
2024-05-26 02:38:57 +02:00
|
|
|
if "zerver_presencesequence" in data:
|
|
|
|
re_map_foreign_keys(data, "zerver_presencesequence", "realm", related_table="realm")
|
|
|
|
bulk_import_model(data, PresenceSequence)
|
|
|
|
else:
|
|
|
|
# We need to enforce the invariant that every realm must have a PresenceSequence.
|
|
|
|
PresenceSequence.objects.create(realm=realm, last_update_id=0)
|
|
|
|
|
2024-09-25 11:51:28 +02:00
|
|
|
named_user_group_id_to_creator_id = {}
|
2023-06-07 14:24:12 +02:00
|
|
|
if "zerver_usergroup" in data:
|
|
|
|
re_map_foreign_keys(data, "zerver_usergroup", "realm", related_table="realm")
|
|
|
|
bulk_import_model(data, UserGroup)
|
|
|
|
|
2024-04-16 11:10:31 +02:00
|
|
|
if "zerver_namedusergroup" in data:
|
2024-06-04 12:36:52 +02:00
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_namedusergroup", "creator", related_table="user_profile"
|
|
|
|
)
|
|
|
|
fix_datetime_fields(data, "zerver_namedusergroup")
|
2024-04-16 11:10:31 +02:00
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_namedusergroup", "usergroup_ptr", related_table="usergroup"
|
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_namedusergroup", "realm_for_sharding", related_table="realm"
|
|
|
|
)
|
2024-09-25 11:51:28 +02:00
|
|
|
for group in data["zerver_namedusergroup"]:
|
|
|
|
creator_id = group.pop("creator_id", None)
|
|
|
|
named_user_group_id_to_creator_id[group["id"]] = creator_id
|
2024-04-18 10:50:51 +02:00
|
|
|
for setting_name in NamedUserGroup.GROUP_PERMISSION_SETTINGS:
|
2024-04-16 11:10:31 +02:00
|
|
|
re_map_foreign_keys(
|
|
|
|
data,
|
|
|
|
"zerver_namedusergroup",
|
2024-04-18 18:59:50 +02:00
|
|
|
setting_name,
|
2024-04-16 11:10:31 +02:00
|
|
|
related_table="usergroup",
|
|
|
|
)
|
|
|
|
bulk_import_named_user_groups(data)
|
|
|
|
|
2023-06-07 14:24:12 +02:00
|
|
|
# We expect Zulip server exports to contain these system groups,
|
|
|
|
# this logic here is needed to handle the imports from other services.
|
2024-07-12 02:30:23 +02:00
|
|
|
role_system_groups_dict: dict[int, NamedUserGroup] | None = None
|
2023-06-07 14:24:12 +02:00
|
|
|
if "zerver_usergroup" not in data:
|
|
|
|
role_system_groups_dict = create_system_user_groups_for_realm(realm)
|
|
|
|
|
|
|
|
# Email tokens will automatically be randomly generated when the
|
|
|
|
# Stream objects are created by Django.
|
|
|
|
fix_datetime_fields(data, "zerver_stream")
|
|
|
|
re_map_foreign_keys(data, "zerver_stream", "realm", related_table="realm")
|
2024-05-14 03:40:42 +02:00
|
|
|
|
2024-03-22 19:37:19 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_stream", "creator", related_table="user_profile")
|
2024-05-14 03:40:42 +02:00
|
|
|
# There's a circular dependency between Stream and UserProfile due to
|
|
|
|
# the .creator attribute. We untangle it by first remembering the creator_id
|
|
|
|
# for all the streams and then removing those fields from the data.
|
|
|
|
# That allows us to successfully import streams, and then later after users
|
|
|
|
# are imported, we can set the .creator_id for all these streams correctly.
|
|
|
|
stream_id_to_creator_id = {}
|
|
|
|
for stream in data["zerver_stream"]:
|
|
|
|
creator_id = stream.pop("creator_id", None)
|
|
|
|
stream_id_to_creator_id[stream["id"]] = creator_id
|
|
|
|
|
2023-06-07 14:24:12 +02:00
|
|
|
if role_system_groups_dict is not None:
|
2023-07-12 19:30:23 +02:00
|
|
|
# Because the system user groups are missing, we manually set up
|
|
|
|
# the defaults for can_remove_subscribers_group for all the
|
|
|
|
# streams.
|
2023-06-07 14:24:12 +02:00
|
|
|
fix_streams_can_remove_subscribers_group_column(data, realm)
|
|
|
|
else:
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_stream", "can_remove_subscribers_group", related_table="usergroup"
|
|
|
|
)
|
|
|
|
# Handle rendering of stream descriptions for import from non-Zulip
|
|
|
|
for stream in data["zerver_stream"]:
|
|
|
|
stream["rendered_description"] = render_stream_description(stream["description"], realm)
|
|
|
|
bulk_import_model(data, Stream)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2023-08-09 15:06:56 +02:00
|
|
|
if "zerver_usergroup" not in data:
|
|
|
|
set_default_for_realm_permission_group_settings(realm)
|
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
# To remap foreign key for UserProfile.last_active_message_id
|
2018-10-16 12:34:47 +02:00
|
|
|
update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "zerver_userprofile")
|
|
|
|
re_map_foreign_keys(data, "zerver_userprofile", "realm", related_table="realm")
|
|
|
|
re_map_foreign_keys(data, "zerver_userprofile", "bot_owner", related_table="user_profile")
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_userprofile", "default_sending_stream", related_table="stream"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_userprofile", "default_events_register_stream", related_table="stream"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_userprofile", "last_active_message_id", related_table="message", id_field=True
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
for user_profile_dict in data["zerver_userprofile"]:
|
|
|
|
user_profile_dict["password"] = None
|
|
|
|
user_profile_dict["api_key"] = generate_api_key()
|
2018-04-23 23:28:27 +02:00
|
|
|
# Since Zulip doesn't use these permissions, drop them
|
2021-02-12 08:20:45 +01:00
|
|
|
del user_profile_dict["user_permissions"]
|
|
|
|
del user_profile_dict["groups"]
|
2020-07-16 14:10:43 +02:00
|
|
|
# The short_name field is obsolete in Zulip, but it's
|
|
|
|
# convenient for third party exports to populate it.
|
2021-02-12 08:20:45 +01:00
|
|
|
if "short_name" in user_profile_dict:
|
|
|
|
del user_profile_dict["short_name"]
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profiles = [UserProfile(**item) for item in data["zerver_userprofile"]]
|
2018-04-23 23:28:27 +02:00
|
|
|
for user_profile in user_profiles:
|
2022-08-26 23:14:12 +02:00
|
|
|
# Validate both email attributes to be defensive
|
|
|
|
# against any malformed data, where .delivery_email
|
|
|
|
# might be set correctly, but .email not.
|
|
|
|
validate_email(user_profile.delivery_email)
|
|
|
|
validate_email(user_profile.email)
|
2018-04-23 23:28:27 +02:00
|
|
|
user_profile.set_unusable_password()
|
2023-05-08 09:17:57 +02:00
|
|
|
user_profile.tos_version = UserProfile.TOS_VERSION_BEFORE_FIRST_LOGIN
|
2018-04-23 23:28:27 +02:00
|
|
|
UserProfile.objects.bulk_create(user_profiles)
|
|
|
|
|
2024-05-14 03:40:42 +02:00
|
|
|
# UserProfiles have been loaded, so now we're ready to set .creator_id
|
|
|
|
# for streams based on the mapping we saved earlier.
|
|
|
|
streams = Stream.objects.filter(id__in=stream_id_to_creator_id.keys())
|
|
|
|
for stream in streams:
|
|
|
|
stream.creator_id = stream_id_to_creator_id[stream.id]
|
|
|
|
Stream.objects.bulk_update(streams, ["creator_id"])
|
|
|
|
|
2024-09-25 11:51:28 +02:00
|
|
|
if "zerver_namedusergroup" in data:
|
|
|
|
# UserProfiles have been loaded, so now we're ready to set .creator_id
|
|
|
|
# for groups based on the mapping we saved earlier.
|
|
|
|
named_user_groups = NamedUserGroup.objects.filter(
|
|
|
|
id__in=named_user_group_id_to_creator_id.keys()
|
|
|
|
)
|
|
|
|
for group in named_user_groups:
|
|
|
|
group.creator_id = named_user_group_id_to_creator_id[group.id]
|
|
|
|
NamedUserGroup.objects.bulk_update(named_user_groups, ["creator_id"])
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_defaultstream", "stream", related_table="stream")
|
|
|
|
re_map_foreign_keys(data, "zerver_realmemoji", "author", related_table="user_profile")
|
2024-02-05 23:52:25 +01:00
|
|
|
|
|
|
|
if settings.BILLING_ENABLED:
|
|
|
|
disable_restricted_authentication_methods(data)
|
|
|
|
|
2023-02-02 04:35:24 +01:00
|
|
|
for table, model, related_table in realm_tables:
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, table, "realm", related_table="realm")
|
2018-07-26 22:45:12 +02:00
|
|
|
update_model_ids(model, data, related_table)
|
|
|
|
bulk_import_model(data, model)
|
2018-05-26 21:18:36 +02:00
|
|
|
|
2022-02-11 18:21:38 +01:00
|
|
|
# Ensure RealmEmoji get the .author set to a reasonable default, if the value
|
|
|
|
# wasn't provided in the import data.
|
|
|
|
first_user_profile = (
|
|
|
|
UserProfile.objects.filter(realm=realm, is_active=True, role=UserProfile.ROLE_REALM_OWNER)
|
|
|
|
.order_by("id")
|
|
|
|
.first()
|
|
|
|
)
|
|
|
|
for realm_emoji in RealmEmoji.objects.filter(realm=realm):
|
|
|
|
if realm_emoji.author_id is None:
|
2022-05-31 01:34:34 +02:00
|
|
|
assert first_user_profile is not None
|
2022-02-11 18:21:38 +01:00
|
|
|
realm_emoji.author_id = first_user_profile.id
|
|
|
|
realm_emoji.save(update_fields=["author_id"])
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "zerver_huddle" in data:
|
2024-07-05 13:13:40 +02:00
|
|
|
update_model_ids(DirectMessageGroup, data, "huddle")
|
|
|
|
# We don't import DirectMessageGroup yet, since we don't have
|
|
|
|
# the data to compute direct message group hashes until we've
|
|
|
|
# imported some of the tables below.
|
2024-07-04 14:05:48 +02:00
|
|
|
# We can't get direct message group hashes without processing
|
|
|
|
# subscriptions first, during which
|
|
|
|
# get_direct_message_groups_from_subscription is called.
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
|
|
|
data,
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver_recipient",
|
|
|
|
"type_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
related_table="stream",
|
|
|
|
recipient_field=True,
|
|
|
|
id_field=True,
|
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data,
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver_recipient",
|
|
|
|
"type_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
related_table="user_profile",
|
|
|
|
recipient_field=True,
|
|
|
|
id_field=True,
|
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data,
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver_recipient",
|
|
|
|
"type_id",
|
2021-02-12 08:19:30 +01:00
|
|
|
related_table="huddle",
|
|
|
|
recipient_field=True,
|
|
|
|
id_field=True,
|
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
update_model_ids(Recipient, data, "recipient")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, Recipient)
|
2019-11-28 16:56:04 +01:00
|
|
|
bulk_set_users_or_streams_recipient_fields(Stream, Stream.objects.filter(realm=realm))
|
|
|
|
bulk_set_users_or_streams_recipient_fields(UserProfile, UserProfile.objects.filter(realm=realm))
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_subscription", "user_profile", related_table="user_profile")
|
2024-07-04 14:05:48 +02:00
|
|
|
get_direct_message_groups_from_subscription(data, "zerver_subscription")
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_subscription", "recipient", related_table="recipient")
|
|
|
|
update_model_ids(Subscription, data, "subscription")
|
2024-03-23 06:18:29 +01:00
|
|
|
fix_subscriptions_is_user_active_column(data, user_profiles, crossrealm_user_ids)
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, Subscription)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "zerver_realmauditlog" in data:
|
|
|
|
fix_datetime_fields(data, "zerver_realmauditlog")
|
|
|
|
re_map_foreign_keys(data, "zerver_realmauditlog", "realm", related_table="realm")
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_realmauditlog", "modified_user", related_table="user_profile"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_realmauditlog", "acting_user", related_table="user_profile"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_realmauditlog", "modified_stream", related_table="stream")
|
2022-11-21 04:48:09 +01:00
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_realmauditlog", "modified_user_group", related_table="usergroup"
|
|
|
|
)
|
2018-07-26 22:45:12 +02:00
|
|
|
update_model_ids(RealmAuditLog, data, related_table="realmauditlog")
|
|
|
|
bulk_import_model(data, RealmAuditLog)
|
2018-07-05 21:28:21 +02:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
logging.info("about to call create_subscription_events")
|
2018-09-30 12:54:16 +02:00
|
|
|
create_subscription_events(
|
|
|
|
data=data,
|
|
|
|
realm_id=realm.id,
|
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
logging.info("done with create_subscription_events")
|
2018-07-05 21:28:21 +02:00
|
|
|
|
2021-04-20 12:29:19 +02:00
|
|
|
# Ensure the invariant that there's always a realm-creation audit
|
|
|
|
# log event, even if the export was generated by an export tool
|
|
|
|
# that does not create RealmAuditLog events.
|
|
|
|
if not RealmAuditLog.objects.filter(
|
2024-09-03 16:46:18 +02:00
|
|
|
realm=realm, event_type=AuditLogEventType.REALM_CREATED
|
2021-04-20 12:29:19 +02:00
|
|
|
).exists():
|
|
|
|
RealmAuditLog.objects.create(
|
|
|
|
realm=realm,
|
2024-09-03 16:46:18 +02:00
|
|
|
event_type=AuditLogEventType.REALM_CREATED,
|
2021-04-20 12:29:19 +02:00
|
|
|
event_time=realm.date_created,
|
|
|
|
# Mark these as backfilled, since they weren't created
|
2021-05-18 14:44:05 +02:00
|
|
|
# when the realm was actually created, and thus do not
|
2021-04-20 12:29:19 +02:00
|
|
|
# have the creating user associated with them.
|
|
|
|
backfilled=True,
|
|
|
|
)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "zerver_huddle" in data:
|
2024-07-04 14:05:48 +02:00
|
|
|
process_direct_message_group_hash(data, "zerver_huddle")
|
2024-07-05 13:13:40 +02:00
|
|
|
bulk_import_model(data, DirectMessageGroup)
|
|
|
|
for direct_message_group in DirectMessageGroup.objects.filter(recipient=None):
|
2024-03-22 00:39:33 +01:00
|
|
|
recipient = Recipient.objects.get(
|
2024-07-04 14:05:48 +02:00
|
|
|
type=Recipient.DIRECT_MESSAGE_GROUP, type_id=direct_message_group.id
|
2024-03-22 00:39:33 +01:00
|
|
|
)
|
2024-07-04 14:05:48 +02:00
|
|
|
direct_message_group.recipient = recipient
|
|
|
|
direct_message_group.save(update_fields=["recipient"])
|
2018-05-25 18:54:22 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "zerver_alertword" in data:
|
|
|
|
re_map_foreign_keys(data, "zerver_alertword", "user_profile", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_alertword", "realm", related_table="realm")
|
|
|
|
update_model_ids(AlertWord, data, "alertword")
|
2020-07-16 16:11:34 +02:00
|
|
|
bulk_import_model(data, AlertWord)
|
|
|
|
|
2024-09-24 17:01:58 +02:00
|
|
|
if "zerver_savedsnippet" in data:
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_savedsnippet", "user_profile", related_table="user_profile"
|
|
|
|
)
|
|
|
|
re_map_foreign_keys(data, "zerver_savedsnippet", "realm", related_table="realm")
|
|
|
|
update_model_ids(SavedSnippet, data, "savedsnippet")
|
|
|
|
bulk_import_model(data, SavedSnippet)
|
|
|
|
|
2023-12-01 08:20:48 +01:00
|
|
|
if "zerver_onboardingstep" in data:
|
|
|
|
fix_datetime_fields(data, "zerver_onboardingstep")
|
|
|
|
re_map_foreign_keys(data, "zerver_onboardingstep", "user", related_table="user_profile")
|
|
|
|
update_model_ids(OnboardingStep, data, "onboardingstep")
|
|
|
|
bulk_import_model(data, OnboardingStep)
|
2018-07-12 16:34:26 +02:00
|
|
|
|
2021-07-31 07:39:59 +02:00
|
|
|
if "zerver_usertopic" in data:
|
|
|
|
fix_datetime_fields(data, "zerver_usertopic")
|
|
|
|
re_map_foreign_keys(data, "zerver_usertopic", "user_profile", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_usertopic", "stream", related_table="stream")
|
|
|
|
re_map_foreign_keys(data, "zerver_usertopic", "recipient", related_table="recipient")
|
|
|
|
update_model_ids(UserTopic, data, "usertopic")
|
2021-07-23 15:26:02 +02:00
|
|
|
bulk_import_model(data, UserTopic)
|
2018-07-14 16:10:45 +02:00
|
|
|
|
2021-03-27 12:23:32 +01:00
|
|
|
if "zerver_muteduser" in data:
|
|
|
|
fix_datetime_fields(data, "zerver_muteduser")
|
|
|
|
re_map_foreign_keys(data, "zerver_muteduser", "user_profile", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_muteduser", "muted_user", related_table="user_profile")
|
|
|
|
update_model_ids(MutedUser, data, "muteduser")
|
|
|
|
bulk_import_model(data, MutedUser)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "zerver_service" in data:
|
|
|
|
re_map_foreign_keys(data, "zerver_service", "user_profile", related_table="user_profile")
|
|
|
|
fix_service_tokens(data, "zerver_service")
|
|
|
|
update_model_ids(Service, data, "service")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, Service)
|
2018-07-14 17:18:24 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "zerver_usergroup" in data:
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_usergroupmembership", "user_group", related_table="usergroup"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_usergroupmembership", "user_profile", related_table="user_profile"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
update_model_ids(UserGroupMembership, data, "usergroupmembership")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, UserGroupMembership)
|
2021-09-29 02:46:57 +02:00
|
|
|
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_groupgroupmembership", "supergroup", related_table="usergroup"
|
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_groupgroupmembership", "subgroup", related_table="usergroup"
|
|
|
|
)
|
|
|
|
update_model_ids(GroupGroupMembership, data, "groupgroupmembership")
|
|
|
|
bulk_import_model(data, GroupGroupMembership)
|
2018-07-12 13:27:12 +02:00
|
|
|
|
2022-07-20 18:58:32 +02:00
|
|
|
# We expect Zulip server exports to contain UserGroupMembership objects
|
|
|
|
# for system groups, this logic here is needed to handle the imports from
|
|
|
|
# other services.
|
|
|
|
if role_system_groups_dict is not None:
|
2022-07-19 12:54:25 +02:00
|
|
|
add_users_to_system_user_groups(realm, user_profiles, role_system_groups_dict)
|
2021-10-18 16:42:20 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "zerver_botstoragedata" in data:
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_botstoragedata", "bot_profile", related_table="user_profile"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
update_model_ids(BotStorageData, data, "botstoragedata")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, BotStorageData)
|
2018-07-17 19:11:16 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "zerver_botconfigdata" in data:
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_botconfigdata", "bot_profile", related_table="user_profile"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
update_model_ids(BotConfigData, data, "botconfigdata")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, BotConfigData)
|
2018-07-17 19:11:16 +02:00
|
|
|
|
2021-06-01 12:55:44 +02:00
|
|
|
if "zerver_realmuserdefault" in data:
|
|
|
|
re_map_foreign_keys(data, "zerver_realmuserdefault", "realm", related_table="realm")
|
|
|
|
update_model_ids(RealmUserDefault, data, "realmuserdefault")
|
|
|
|
bulk_import_model(data, RealmUserDefault)
|
|
|
|
|
|
|
|
# Create RealmUserDefault table with default values if not created
|
|
|
|
# already from the import data; this can happen when importing
|
|
|
|
# data from another product.
|
|
|
|
if not RealmUserDefault.objects.filter(realm=realm).exists():
|
|
|
|
RealmUserDefault.objects.create(realm=realm)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "zerver_userpresence")
|
|
|
|
re_map_foreign_keys(data, "zerver_userpresence", "user_profile", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_userpresence", "realm", related_table="realm")
|
|
|
|
update_model_ids(UserPresence, data, "user_presence")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, UserPresence)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "zerver_useractivity")
|
|
|
|
re_map_foreign_keys(data, "zerver_useractivity", "user_profile", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_useractivity", "client", related_table="client")
|
|
|
|
update_model_ids(UserActivity, data, "useractivity")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, UserActivity)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "zerver_useractivityinterval")
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_useractivityinterval", "user_profile", related_table="user_profile"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
update_model_ids(UserActivityInterval, data, "useractivityinterval")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, UserActivityInterval)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_customprofilefield", "realm", related_table="realm")
|
2018-07-26 22:45:12 +02:00
|
|
|
update_model_ids(CustomProfileField, data, related_table="customprofilefield")
|
|
|
|
bulk_import_model(data, CustomProfileField)
|
2018-05-23 08:50:11 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_customprofilefieldvalue", "user_profile", related_table="user_profile"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_customprofilefieldvalue", "field", related_table="customprofilefield"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2018-07-16 17:15:42 +02:00
|
|
|
fix_customprofilefield(data)
|
2018-07-26 22:45:12 +02:00
|
|
|
update_model_ids(CustomProfileFieldValue, data, related_table="customprofilefieldvalue")
|
|
|
|
bulk_import_model(data, CustomProfileFieldValue)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
|
|
|
# Import uploaded files and avatars
|
2022-04-28 23:36:14 +02:00
|
|
|
import_uploads(
|
|
|
|
realm,
|
|
|
|
os.path.join(import_dir, "avatars"),
|
|
|
|
processes,
|
|
|
|
default_user_profile_id=None, # Fail if there is no user set
|
|
|
|
processing_avatars=True,
|
|
|
|
)
|
|
|
|
import_uploads(
|
|
|
|
realm,
|
|
|
|
os.path.join(import_dir, "uploads"),
|
|
|
|
processes,
|
|
|
|
default_user_profile_id=None, # Fail if there is no user set
|
|
|
|
)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2024-07-21 21:48:59 +02:00
|
|
|
# We need to have this check as the emoji files may not
|
|
|
|
# be present in import data from other services.
|
2018-04-23 23:28:27 +02:00
|
|
|
if os.path.exists(os.path.join(import_dir, "emoji")):
|
2022-04-28 23:36:14 +02:00
|
|
|
import_uploads(
|
|
|
|
realm,
|
|
|
|
os.path.join(import_dir, "emoji"),
|
|
|
|
processes,
|
|
|
|
default_user_profile_id=first_user_profile.id if first_user_profile else None,
|
|
|
|
processing_emojis=True,
|
|
|
|
)
|
2019-07-19 19:15:23 +02:00
|
|
|
|
|
|
|
if os.path.exists(os.path.join(import_dir, "realm_icons")):
|
2021-02-12 08:19:30 +01:00
|
|
|
import_uploads(
|
2022-04-28 23:36:14 +02:00
|
|
|
realm,
|
|
|
|
os.path.join(import_dir, "realm_icons"),
|
|
|
|
processes,
|
|
|
|
default_user_profile_id=first_user_profile.id if first_user_profile else None,
|
|
|
|
processing_realm_icons=True,
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
sender_map = {user["id"]: user for user in data["zerver_userprofile"]}
|
2018-11-02 14:05:52 +01:00
|
|
|
|
2024-05-15 12:07:08 +02:00
|
|
|
# TODO: de-dup how we read these json files.
|
|
|
|
attachments_file = os.path.join(import_dir, "attachment.json")
|
|
|
|
if not os.path.exists(attachments_file):
|
|
|
|
raise Exception("Missing attachment.json file!")
|
|
|
|
|
|
|
|
# Important: map_messages_to_attachments should be called before fix_upload_links
|
|
|
|
# which is called by import_message_data and another for zerver_scheduledmessage.
|
|
|
|
with open(attachments_file, "rb") as f:
|
|
|
|
attachment_data = orjson.loads(f.read())
|
|
|
|
|
thumbnail: Make thumbnailing work with data import.
We didn't have thumbnailing for images coming from data import and this
commit adds the functionality.
There are a few fundamental issues that the implementation needs to
solve.
1. The images come from an untrusted source and therefore we don't want
to just pass them through to thumbnailing without checking. For that
reason, we cannot just import ImageAttachment rows from the export
data, even for zulip=>zulip imports.
The right way to process images is to pass them to maybe_thumbail(),
which runs libvips_check_image() on them to verify we're okay with
thumbnailing, creates ImageAttachment rows for them and sends them
to the thumbnailing queue worker. This approach lets us handle both
zulip=>zulip and 3rd party=>zulip imports in the same way,
2. There is a somewhat circular dependency between the Message,
Attachment and ImageAttachment import process:
- ImageAttachments would ideally be created after importing
Attachments, but they need to already exist at the time of Message
import. Otherwise, the markdown processor doesn't know it has to add
HTML for image previews to messages that reference images. This would
mean that messages imported from 3rd party tools don't get image
previews.
- Attachments only get created after Message import however, due to the
many-to-many relationship between Message and Attachment.
This is solved by fixing up some data of Attachments pre-emptively, such
as the path_ids. This gives us the necessary information for creating
ImageAttachments before importing Messages.
While we generate ImageAttachment rows synchronously, the actual
thumbnailing job is sent to the queue worker. Theoretically, the worker
could be very backlogged and not process the thumbnails anytime soon.
This is fine - if the app is loaded and tries to display a message with
such a not-yet-generated thumbnail, the code in `serve_file` will
generate the thumbnails synchronously on the fly and the user will see
the image preview displayed normally. See:
https://github.com/zulip/zulip/blob/1b47134d0d564f8ba4961d25743f3ad0f09e6dfb/zerver/views/upload.py#L333-L342
2024-10-17 21:20:49 +02:00
|
|
|
# We need to import ImageAttachments before messages, as the message rendering logic
|
|
|
|
# checks for existence of ImageAttachment records to determine if HTML content for image
|
|
|
|
# preview needs to be added to a message.
|
|
|
|
# In order for ImageAttachments to be correctly created, we need to know the new path_ids
|
|
|
|
# and content_types of the attachments.
|
|
|
|
#
|
|
|
|
# Begin by fixing up the Attachment data.
|
|
|
|
fix_attachments_data(attachment_data)
|
|
|
|
# Now we're ready create ImageAttachment rows and enqueue thumbnailing
|
|
|
|
# for the images.
|
|
|
|
# This order ensures that during message import, rendered_content will be generated
|
|
|
|
# correctly with image previews.
|
|
|
|
# The important detail here is that we **only** care about having ImageAttachment
|
|
|
|
# rows ready at the time of message import. Thumbnailing happens in a queue worker
|
|
|
|
# in a different process, and we don't care about when it'll complete.
|
|
|
|
create_image_attachments_and_maybe_enqueue_thumbnailing(realm, attachment_data)
|
2024-05-15 12:07:08 +02:00
|
|
|
map_messages_to_attachments(attachment_data)
|
|
|
|
|
2024-01-24 21:18:08 +01:00
|
|
|
# Import zerver_message and zerver_usermessage
|
|
|
|
import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)
|
|
|
|
|
2024-07-16 11:03:41 +02:00
|
|
|
if "zerver_onboardingusermessage" in data:
|
|
|
|
fix_bitfield_keys(data, "zerver_onboardingusermessage", "flags")
|
|
|
|
re_map_foreign_keys(data, "zerver_onboardingusermessage", "realm", related_table="realm")
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_onboardingusermessage", "message", related_table="message"
|
|
|
|
)
|
|
|
|
update_model_ids(OnboardingUserMessage, data, "onboardingusermessage")
|
|
|
|
bulk_import_model(data, OnboardingUserMessage)
|
|
|
|
|
2023-04-29 20:45:22 +02:00
|
|
|
if "zerver_scheduledmessage" in data:
|
|
|
|
fix_datetime_fields(data, "zerver_scheduledmessage")
|
|
|
|
re_map_foreign_keys(data, "zerver_scheduledmessage", "sender", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_scheduledmessage", "recipient", related_table="recipient")
|
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_scheduledmessage", "sending_client", related_table="client"
|
|
|
|
)
|
|
|
|
re_map_foreign_keys(data, "zerver_scheduledmessage", "stream", related_table="stream")
|
|
|
|
re_map_foreign_keys(data, "zerver_scheduledmessage", "realm", related_table="realm")
|
2024-01-24 21:18:08 +01:00
|
|
|
re_map_foreign_keys(
|
|
|
|
data, "zerver_scheduledmessage", "delivered_message", related_table="message"
|
|
|
|
)
|
2023-04-29 20:45:22 +02:00
|
|
|
|
|
|
|
fix_upload_links(data, "zerver_scheduledmessage")
|
|
|
|
|
|
|
|
fix_message_rendered_content(
|
|
|
|
realm=realm,
|
|
|
|
sender_map=sender_map,
|
|
|
|
messages=data["zerver_scheduledmessage"],
|
|
|
|
)
|
|
|
|
|
|
|
|
update_model_ids(ScheduledMessage, data, "scheduledmessage")
|
|
|
|
bulk_import_model(data, ScheduledMessage)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_reaction", "message", related_table="message")
|
|
|
|
re_map_foreign_keys(data, "zerver_reaction", "user_profile", related_table="user_profile")
|
2021-12-05 13:01:21 +01:00
|
|
|
re_map_realm_emoji_codes(data, table_name="zerver_reaction")
|
2021-02-12 08:20:45 +01:00
|
|
|
update_model_ids(Reaction, data, "reaction")
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, Reaction)
|
2018-05-24 13:56:15 +02:00
|
|
|
|
2019-03-04 17:50:49 +01:00
|
|
|
# Similarly, we need to recalculate the first_message_id for stream objects.
|
2022-11-03 20:13:43 +01:00
|
|
|
update_first_message_id_query = SQL(
|
|
|
|
"""
|
2022-09-28 17:58:23 +02:00
|
|
|
UPDATE zerver_stream
|
|
|
|
SET first_message_id = subquery.first_message_id
|
|
|
|
FROM (
|
|
|
|
SELECT r.type_id id, min(m.id) first_message_id
|
|
|
|
FROM zerver_message m
|
|
|
|
JOIN zerver_recipient r ON
|
|
|
|
r.id = m.recipient_id
|
2022-11-03 20:13:43 +01:00
|
|
|
WHERE r.type = 2 AND m.realm_id = %(realm_id)s
|
2022-09-28 17:58:23 +02:00
|
|
|
GROUP BY r.type_id
|
|
|
|
) AS subquery
|
|
|
|
WHERE zerver_stream.id = subquery.id
|
|
|
|
"""
|
2022-11-03 20:13:43 +01:00
|
|
|
)
|
|
|
|
|
2022-09-28 17:58:23 +02:00
|
|
|
with connection.cursor() as cursor:
|
2022-11-03 20:13:43 +01:00
|
|
|
cursor.execute(update_first_message_id_query, {"realm_id": realm.id})
|
2019-03-04 17:50:49 +01:00
|
|
|
|
2021-12-05 13:42:04 +01:00
|
|
|
if "zerver_userstatus" in data:
|
|
|
|
fix_datetime_fields(data, "zerver_userstatus")
|
|
|
|
re_map_foreign_keys(data, "zerver_userstatus", "user_profile", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_userstatus", "client", related_table="client")
|
|
|
|
update_model_ids(UserStatus, data, "userstatus")
|
|
|
|
re_map_realm_emoji_codes(data, table_name="zerver_userstatus")
|
|
|
|
bulk_import_model(data, UserStatus)
|
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
# Do attachments AFTER message data is loaded.
|
2024-05-15 12:07:08 +02:00
|
|
|
logging.info("Importing attachment data from %s", attachments_file)
|
2023-04-03 16:05:22 +02:00
|
|
|
import_attachments(attachment_data)
|
2018-12-13 08:19:29 +01:00
|
|
|
|
2019-01-30 08:54:29 +01:00
|
|
|
# Import the analytics file.
|
2024-03-23 06:18:29 +01:00
|
|
|
import_analytics_data(
|
|
|
|
realm=realm, import_dir=import_dir, crossrealm_user_ids=crossrealm_user_ids
|
|
|
|
)
|
2019-01-30 08:54:29 +01:00
|
|
|
|
2018-12-13 08:19:29 +01:00
|
|
|
if settings.BILLING_ENABLED:
|
2021-12-01 02:10:40 +01:00
|
|
|
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
|
2019-02-13 01:01:02 +01:00
|
|
|
else:
|
2021-12-01 02:10:40 +01:00
|
|
|
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_SELF_HOSTED, acting_user=None)
|
2023-04-03 16:05:22 +02:00
|
|
|
|
|
|
|
# Activate the realm
|
|
|
|
realm.deactivated = data["zerver_realm"][0]["deactivated"]
|
|
|
|
realm.save()
|
|
|
|
|
2023-12-11 23:26:38 +01:00
|
|
|
# This helps to have an accurate user count data for the billing
|
|
|
|
# system if someone tries to signup just after doing import.
|
|
|
|
RealmAuditLog.objects.create(
|
|
|
|
realm=realm,
|
2024-09-03 16:46:18 +02:00
|
|
|
event_type=AuditLogEventType.REALM_IMPORTED,
|
2023-12-11 23:26:38 +01:00
|
|
|
event_time=timezone_now(),
|
|
|
|
extra_data={
|
|
|
|
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2023-12-04 08:05:13 +01:00
|
|
|
# Ask the push notifications service if this realm can send
|
|
|
|
# notifications, if we're using it. Needs to happen after the
|
|
|
|
# Realm object is reactivated.
|
2023-12-11 04:59:00 +01:00
|
|
|
maybe_enqueue_audit_log_upload(realm)
|
2023-12-04 08:05:13 +01:00
|
|
|
|
2024-04-17 13:53:32 +02:00
|
|
|
# If the export was NOT generated by another zulip server, the
|
|
|
|
# 'zulip_update_announcements_level' is set to None by default.
|
|
|
|
# Set it to the latest level to avoid receiving older update messages.
|
|
|
|
is_realm_imported_from_other_zulip_server = RealmAuditLog.objects.filter(
|
2024-10-09 02:10:36 +02:00
|
|
|
realm=realm, event_type=AuditLogEventType.REALM_EXPORTED
|
2024-04-17 13:53:32 +02:00
|
|
|
).exists()
|
|
|
|
if not is_realm_imported_from_other_zulip_server:
|
2024-05-24 17:19:24 +02:00
|
|
|
send_zulip_update_announcements_to_realm(
|
|
|
|
realm, skip_delay=False, realm_imported_from_other_product=True
|
|
|
|
)
|
2024-04-17 13:53:32 +02:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
return realm
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def update_message_foreign_keys(import_dir: Path, sort_by_date: bool) -> None:
|
2018-10-16 12:34:47 +02:00
|
|
|
old_id_list = get_incoming_message_ids(
|
|
|
|
import_dir=import_dir,
|
|
|
|
sort_by_date=sort_by_date,
|
|
|
|
)
|
|
|
|
|
|
|
|
count = len(old_id_list)
|
|
|
|
|
|
|
|
new_id_list = allocate_ids(model_class=Message, count=count)
|
|
|
|
|
2024-07-12 02:30:30 +02:00
|
|
|
for old_id, new_id in zip(old_id_list, new_id_list, strict=False):
|
2018-10-16 12:34:47 +02:00
|
|
|
update_id_map(
|
2021-02-12 08:20:45 +01:00
|
|
|
table="message",
|
2018-10-16 12:34:47 +02:00
|
|
|
old_id=old_id,
|
|
|
|
new_id=new_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
# We don't touch user_message keys here; that happens later when
|
|
|
|
# we're actually read the files a second time to get actual data.
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_incoming_message_ids(import_dir: Path, sort_by_date: bool) -> list[int]:
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2018-10-16 12:34:47 +02:00
|
|
|
This function reads in our entire collection of message
|
|
|
|
ids, which can be millions of integers for some installations.
|
|
|
|
And then we sort the list. This is necessary to ensure
|
|
|
|
that the sort order of incoming ids matches the sort order
|
2019-08-28 02:43:19 +02:00
|
|
|
of date_sent, which isn't always guaranteed by our
|
2018-10-16 12:34:47 +02:00
|
|
|
utilities that convert third party chat data. We also
|
|
|
|
need to move our ids to a new range if we're dealing
|
|
|
|
with a server that has data for other realms.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2018-10-16 12:34:47 +02:00
|
|
|
|
|
|
|
if sort_by_date:
|
2024-07-12 02:30:17 +02:00
|
|
|
tups: list[tuple[int, int]] = []
|
2018-10-16 12:34:47 +02:00
|
|
|
else:
|
2024-07-12 02:30:17 +02:00
|
|
|
message_ids: list[int] = []
|
2018-10-16 12:34:47 +02:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
dump_file_id = 1
|
|
|
|
while True:
|
2020-06-13 08:59:37 +02:00
|
|
|
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
|
2018-04-23 23:28:27 +02:00
|
|
|
if not os.path.exists(message_filename):
|
|
|
|
break
|
|
|
|
|
2020-08-07 01:09:47 +02:00
|
|
|
with open(message_filename, "rb") as f:
|
|
|
|
data = orjson.loads(f.read())
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2018-10-16 12:34:47 +02:00
|
|
|
# Aggressively free up memory.
|
2021-02-12 08:20:45 +01:00
|
|
|
del data["zerver_usermessage"]
|
2018-10-16 12:34:47 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
for row in data["zerver_message"]:
|
2019-08-28 02:43:19 +02:00
|
|
|
# We truncate date_sent to int to theoretically
|
2018-10-16 12:34:47 +02:00
|
|
|
# save memory and speed up the sort. For
|
|
|
|
# Zulip-to-Zulip imports, the
|
|
|
|
# message_id will generally be a good tiebreaker.
|
2023-10-09 20:54:10 +02:00
|
|
|
# If we occasionally misorder the ids for two
|
2018-10-16 12:34:47 +02:00
|
|
|
# messages from the same second, it's not the
|
|
|
|
# end of the world, as it's likely those messages
|
|
|
|
# arrived to the original server in somewhat
|
|
|
|
# arbitrary order.
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
message_id = row["id"]
|
2018-10-16 12:34:47 +02:00
|
|
|
|
|
|
|
if sort_by_date:
|
2021-02-12 08:20:45 +01:00
|
|
|
date_sent = int(row["date_sent"])
|
2019-08-28 02:43:19 +02:00
|
|
|
tup = (date_sent, message_id)
|
2018-10-16 12:34:47 +02:00
|
|
|
tups.append(tup)
|
|
|
|
else:
|
|
|
|
message_ids.append(message_id)
|
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
dump_file_id += 1
|
|
|
|
|
2018-10-16 12:34:47 +02:00
|
|
|
if sort_by_date:
|
|
|
|
tups.sort()
|
|
|
|
message_ids = [tup[1] for tup in tups]
|
|
|
|
|
|
|
|
return message_ids
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def import_message_data(realm: Realm, sender_map: dict[int, Record], import_dir: Path) -> None:
|
2018-04-23 23:28:27 +02:00
|
|
|
dump_file_id = 1
|
|
|
|
while True:
|
2020-06-13 08:59:37 +02:00
|
|
|
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
|
2018-04-23 23:28:27 +02:00
|
|
|
if not os.path.exists(message_filename):
|
|
|
|
break
|
|
|
|
|
2020-08-07 01:09:47 +02:00
|
|
|
with open(message_filename, "rb") as f:
|
|
|
|
data = orjson.loads(f.read())
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Importing message dump %s", message_filename)
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_message", "sender", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_message", "recipient", related_table="recipient")
|
|
|
|
re_map_foreign_keys(data, "zerver_message", "sending_client", related_table="client")
|
|
|
|
fix_datetime_fields(data, "zerver_message")
|
2020-10-23 02:43:28 +02:00
|
|
|
# Parser to update message content with the updated attachment URLs
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_upload_links(data, "zerver_message")
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2018-10-16 12:34:47 +02:00
|
|
|
# We already create mappings for zerver_message ids
|
|
|
|
# in update_message_foreign_keys(), so here we simply
|
|
|
|
# apply them.
|
2021-02-12 08:20:45 +01:00
|
|
|
message_id_map = ID_MAP["message"]
|
|
|
|
for row in data["zerver_message"]:
|
2022-09-27 21:42:31 +02:00
|
|
|
del row["realm"]
|
|
|
|
row["realm_id"] = realm.id
|
2021-02-12 08:20:45 +01:00
|
|
|
row["id"] = message_id_map[row["id"]]
|
2018-10-16 12:34:47 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
for row in data["zerver_usermessage"]:
|
|
|
|
assert row["message"] in message_id_map
|
2018-10-16 12:34:47 +02:00
|
|
|
|
2018-11-02 14:05:52 +01:00
|
|
|
fix_message_rendered_content(
|
|
|
|
realm=realm,
|
|
|
|
sender_map=sender_map,
|
2021-02-12 08:20:45 +01:00
|
|
|
messages=data["zerver_message"],
|
2018-11-02 14:05:52 +01:00
|
|
|
)
|
2020-08-11 01:47:49 +02:00
|
|
|
logging.info("Successfully rendered Markdown for message batch")
|
2018-08-09 17:50:43 +02:00
|
|
|
|
2024-05-30 02:57:56 +02:00
|
|
|
fix_message_edit_history(
|
|
|
|
realm=realm, sender_map=sender_map, messages=data["zerver_message"]
|
|
|
|
)
|
2018-11-02 15:18:29 +01:00
|
|
|
# A LOT HAPPENS HERE.
|
|
|
|
# This is where we actually import the message data.
|
|
|
|
bulk_import_model(data, Message)
|
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
# Due to the structure of these message chunks, we're
|
|
|
|
# guaranteed to have already imported all the Message objects
|
|
|
|
# for this batch of UserMessage objects.
|
2021-02-12 08:20:45 +01:00
|
|
|
re_map_foreign_keys(data, "zerver_usermessage", "message", related_table="message")
|
2021-02-12 08:19:30 +01:00
|
|
|
re_map_foreign_keys(
|
2021-02-12 08:20:45 +01:00
|
|
|
data, "zerver_usermessage", "user_profile", related_table="user_profile"
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_bitfield_keys(data, "zerver_usermessage", "flags")
|
2018-10-12 23:42:17 +02:00
|
|
|
|
|
|
|
bulk_import_user_message_data(data, dump_file_id)
|
2018-04-23 23:28:27 +02:00
|
|
|
dump_file_id += 1
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
def import_attachments(data: TableData) -> None:
|
|
|
|
# Clean up the data in zerver_attachment that is not
|
|
|
|
# relevant to our many-to-many import.
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "zerver_attachment")
|
|
|
|
re_map_foreign_keys(data, "zerver_attachment", "owner", related_table="user_profile")
|
|
|
|
re_map_foreign_keys(data, "zerver_attachment", "realm", related_table="realm")
|
2018-04-23 23:28:27 +02:00
|
|
|
|
|
|
|
# Configure ourselves. Django models many-to-many (m2m)
|
|
|
|
# relations asymmetrically. The parent here refers to the
|
|
|
|
# Model that has the ManyToManyField. It is assumed here
|
|
|
|
# the child models have been loaded, but we are in turn
|
|
|
|
# responsible for loading the parents and the m2m rows.
|
|
|
|
parent_model = Attachment
|
2021-02-12 08:20:45 +01:00
|
|
|
parent_db_table_name = "zerver_attachment"
|
|
|
|
parent_singular = "attachment"
|
|
|
|
parent_id = "attachment_id"
|
|
|
|
|
|
|
|
update_model_ids(parent_model, data, "attachment")
|
2018-07-27 01:13:14 +02:00
|
|
|
# We don't bulk_import_model yet, because we need to first compute
|
|
|
|
# the many-to-many for this table.
|
|
|
|
|
2018-04-23 23:28:27 +02:00
|
|
|
# First, build our list of many-to-many (m2m) rows.
|
|
|
|
# We do this in a slightly convoluted way to anticipate
|
|
|
|
# a future where we may need to call re_map_foreign_keys.
|
|
|
|
|
2023-04-29 20:45:22 +02:00
|
|
|
def format_m2m_data(
|
|
|
|
child_singular: str, child_plural: str, m2m_table_name: str, child_id: str
|
2024-07-12 02:30:17 +02:00
|
|
|
) -> tuple[str, list[Record], str]:
|
2023-07-31 22:52:35 +02:00
|
|
|
m2m_rows = [
|
|
|
|
{
|
|
|
|
parent_singular: parent_row["id"],
|
2023-04-29 20:45:22 +02:00
|
|
|
# child_singular will generally match the model name (e.g. Message, ScheduledMessage)
|
|
|
|
# after lowercasing, and that's what we enter as ID_MAP keys, so this should be
|
|
|
|
# a reasonable assumption to make.
|
2023-07-31 22:52:35 +02:00
|
|
|
child_singular: ID_MAP[child_singular][fk_id],
|
|
|
|
}
|
|
|
|
for parent_row in data[parent_db_table_name]
|
|
|
|
for fk_id in parent_row[child_plural]
|
|
|
|
]
|
2023-04-29 20:45:22 +02:00
|
|
|
|
|
|
|
# Create our table data for insert.
|
|
|
|
m2m_data: TableData = {m2m_table_name: m2m_rows}
|
|
|
|
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
|
|
|
|
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
|
|
|
|
m2m_rows = m2m_data[m2m_table_name]
|
|
|
|
|
|
|
|
# Next, delete out our child data from the parent rows.
|
|
|
|
for parent_row in data[parent_db_table_name]:
|
|
|
|
del parent_row[child_plural]
|
|
|
|
|
|
|
|
return m2m_table_name, m2m_rows, child_id
|
|
|
|
|
|
|
|
messages_m2m_tuple = format_m2m_data(
|
|
|
|
"message", "messages", "zerver_attachment_messages", "message_id"
|
|
|
|
)
|
|
|
|
scheduled_messages_m2m_tuple = format_m2m_data(
|
|
|
|
"scheduledmessage",
|
|
|
|
"scheduled_messages",
|
|
|
|
"zerver_attachment_scheduled_messages",
|
|
|
|
"scheduledmessage_id",
|
|
|
|
)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
|
|
|
# Next, load the parent rows.
|
2018-07-26 22:45:12 +02:00
|
|
|
bulk_import_model(data, parent_model)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
|
|
|
# Now, go back to our m2m rows.
|
|
|
|
# TODO: Do this the kosher Django way. We may find a
|
|
|
|
# better way to do this in Django 1.9 particularly.
|
|
|
|
with connection.cursor() as cursor:
|
2023-04-29 20:45:22 +02:00
|
|
|
for m2m_table_name, m2m_rows, child_id in [
|
|
|
|
messages_m2m_tuple,
|
|
|
|
scheduled_messages_m2m_tuple,
|
|
|
|
]:
|
|
|
|
sql_template = SQL(
|
|
|
|
"""
|
|
|
|
INSERT INTO {m2m_table_name} ({parent_id}, {child_id}) VALUES %s
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2023-04-29 20:45:22 +02:00
|
|
|
).format(
|
|
|
|
m2m_table_name=Identifier(m2m_table_name),
|
|
|
|
parent_id=Identifier(parent_id),
|
|
|
|
child_id=Identifier(child_id),
|
|
|
|
)
|
|
|
|
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
|
|
|
|
execute_values(cursor.cursor, sql_template, tups)
|
2018-04-23 23:28:27 +02:00
|
|
|
|
2023-04-29 20:45:22 +02:00
|
|
|
logging.info("Successfully imported M2M table %s", m2m_table_name)
|
2019-01-30 08:54:29 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
thumbnail: Make thumbnailing work with data import.
We didn't have thumbnailing for images coming from data import and this
commit adds the functionality.
There are a few fundamental issues that the implementation needs to
solve.
1. The images come from an untrusted source and therefore we don't want
to just pass them through to thumbnailing without checking. For that
reason, we cannot just import ImageAttachment rows from the export
data, even for zulip=>zulip imports.
The right way to process images is to pass them to maybe_thumbail(),
which runs libvips_check_image() on them to verify we're okay with
thumbnailing, creates ImageAttachment rows for them and sends them
to the thumbnailing queue worker. This approach lets us handle both
zulip=>zulip and 3rd party=>zulip imports in the same way,
2. There is a somewhat circular dependency between the Message,
Attachment and ImageAttachment import process:
- ImageAttachments would ideally be created after importing
Attachments, but they need to already exist at the time of Message
import. Otherwise, the markdown processor doesn't know it has to add
HTML for image previews to messages that reference images. This would
mean that messages imported from 3rd party tools don't get image
previews.
- Attachments only get created after Message import however, due to the
many-to-many relationship between Message and Attachment.
This is solved by fixing up some data of Attachments pre-emptively, such
as the path_ids. This gives us the necessary information for creating
ImageAttachments before importing Messages.
While we generate ImageAttachment rows synchronously, the actual
thumbnailing job is sent to the queue worker. Theoretically, the worker
could be very backlogged and not process the thumbnails anytime soon.
This is fine - if the app is loaded and tries to display a message with
such a not-yet-generated thumbnail, the code in `serve_file` will
generate the thumbnails synchronously on the fly and the user will see
the image preview displayed normally. See:
https://github.com/zulip/zulip/blob/1b47134d0d564f8ba4961d25743f3ad0f09e6dfb/zerver/views/upload.py#L333-L342
2024-10-17 21:20:49 +02:00
|
|
|
def fix_attachments_data(attachment_data: TableData) -> None:
|
|
|
|
for attachment in attachment_data["zerver_attachment"]:
|
|
|
|
attachment["path_id"] = path_maps["old_attachment_path_to_new_path"][attachment["path_id"]]
|
|
|
|
|
|
|
|
# In the case of images, content_type needs to be set for thumbnailing.
|
|
|
|
# Zulip exports set this, but third-party exports may not.
|
|
|
|
if attachment.get("content_type") is None:
|
|
|
|
guessed_content_type = guess_type(attachment["path_id"])[0]
|
|
|
|
if guessed_content_type in THUMBNAIL_ACCEPT_IMAGE_TYPES:
|
|
|
|
attachment["content_type"] = guessed_content_type
|
|
|
|
|
|
|
|
|
|
|
|
def create_image_attachments_and_maybe_enqueue_thumbnailing(
|
|
|
|
realm: Realm, attachment_data: TableData
|
|
|
|
) -> None:
|
|
|
|
for attachment in attachment_data["zerver_attachment"]:
|
|
|
|
if attachment["content_type"] not in THUMBNAIL_ACCEPT_IMAGE_TYPES:
|
|
|
|
continue
|
|
|
|
|
|
|
|
path_id = attachment["path_id"]
|
|
|
|
content_type = attachment["content_type"]
|
|
|
|
|
|
|
|
# We don't have to go to S3 to obtain the file. We still have the export
|
|
|
|
# data on disk and stored the absolute path to it.
|
|
|
|
local_filename = path_maps["new_attachment_path_to_local_data_path"][path_id]
|
|
|
|
pyvips_source = pyvips.Source.new_from_file(local_filename)
|
|
|
|
maybe_thumbnail(pyvips_source, content_type, path_id, realm.id)
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def import_analytics_data(realm: Realm, import_dir: Path, crossrealm_user_ids: set[int]) -> None:
|
2019-01-30 08:54:29 +01:00
|
|
|
analytics_filename = os.path.join(import_dir, "analytics.json")
|
|
|
|
if not os.path.exists(analytics_filename):
|
|
|
|
return
|
|
|
|
|
2020-05-02 08:44:14 +02:00
|
|
|
logging.info("Importing analytics data from %s", analytics_filename)
|
2020-08-07 01:09:47 +02:00
|
|
|
with open(analytics_filename, "rb") as f:
|
|
|
|
data = orjson.loads(f.read())
|
2019-01-30 08:54:29 +01:00
|
|
|
|
|
|
|
# Process the data through the fixer functions.
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "analytics_realmcount")
|
|
|
|
re_map_foreign_keys(data, "analytics_realmcount", "realm", related_table="realm")
|
|
|
|
update_model_ids(RealmCount, data, "analytics_realmcount")
|
2019-01-30 08:54:29 +01:00
|
|
|
bulk_import_model(data, RealmCount)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "analytics_usercount")
|
|
|
|
re_map_foreign_keys(data, "analytics_usercount", "realm", related_table="realm")
|
|
|
|
re_map_foreign_keys(data, "analytics_usercount", "user", related_table="user_profile")
|
2024-03-23 06:18:29 +01:00
|
|
|
data["analytics_usercount"] = [
|
|
|
|
row for row in data["analytics_usercount"] if row["user_id"] not in crossrealm_user_ids
|
|
|
|
]
|
2021-02-12 08:20:45 +01:00
|
|
|
update_model_ids(UserCount, data, "analytics_usercount")
|
2019-01-30 08:54:29 +01:00
|
|
|
bulk_import_model(data, UserCount)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
fix_datetime_fields(data, "analytics_streamcount")
|
|
|
|
re_map_foreign_keys(data, "analytics_streamcount", "realm", related_table="realm")
|
|
|
|
re_map_foreign_keys(data, "analytics_streamcount", "stream", related_table="stream")
|
|
|
|
update_model_ids(StreamCount, data, "analytics_streamcount")
|
2019-01-30 08:54:29 +01:00
|
|
|
bulk_import_model(data, StreamCount)
|
2021-10-18 16:42:20 +02:00
|
|
|
|
|
|
|
|
2022-07-19 12:54:25 +02:00
|
|
|
def add_users_to_system_user_groups(
|
2024-04-16 16:05:43 +02:00
|
|
|
realm: Realm,
|
2024-07-12 02:30:17 +02:00
|
|
|
user_profiles: list[UserProfile],
|
|
|
|
role_system_groups_dict: dict[int, NamedUserGroup],
|
2021-10-18 16:42:20 +02:00
|
|
|
) -> None:
|
2024-04-02 18:39:18 +02:00
|
|
|
full_members_system_group = NamedUserGroup.objects.get(
|
2023-09-21 13:06:39 +02:00
|
|
|
name=SystemGroups.FULL_MEMBERS,
|
2021-10-18 16:42:20 +02:00
|
|
|
realm=realm,
|
|
|
|
is_system_group=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
usergroup_memberships = []
|
|
|
|
for user_profile in user_profiles:
|
|
|
|
user_group = role_system_groups_dict[user_profile.role]
|
|
|
|
usergroup_memberships.append(
|
|
|
|
UserGroupMembership(user_profile=user_profile, user_group=user_group)
|
|
|
|
)
|
|
|
|
if user_profile.role == UserProfile.ROLE_MEMBER and not user_profile.is_provisional_member:
|
|
|
|
usergroup_memberships.append(
|
|
|
|
UserGroupMembership(user_profile=user_profile, user_group=full_members_system_group)
|
|
|
|
)
|
|
|
|
UserGroupMembership.objects.bulk_create(usergroup_memberships)
|
2023-07-12 19:13:17 +02:00
|
|
|
now = timezone_now()
|
|
|
|
RealmAuditLog.objects.bulk_create(
|
|
|
|
RealmAuditLog(
|
|
|
|
realm=realm,
|
|
|
|
modified_user=membership.user_profile,
|
2024-04-17 16:34:39 +02:00
|
|
|
modified_user_group=membership.user_group.named_user_group,
|
2024-09-06 17:06:03 +02:00
|
|
|
event_type=AuditLogEventType.USER_GROUP_DIRECT_USER_MEMBERSHIP_ADDED,
|
2023-07-12 19:13:17 +02:00
|
|
|
event_time=now,
|
|
|
|
acting_user=None,
|
|
|
|
)
|
|
|
|
for membership in usergroup_memberships
|
|
|
|
)
|