2023-08-31 20:37:08 +02:00
|
|
|
import functools
|
2021-02-08 20:02:17 +01:00
|
|
|
import heapq
|
2017-12-13 01:45:57 +01:00
|
|
|
import logging
|
2020-06-11 00:54:34 +02:00
|
|
|
from collections import defaultdict
|
2024-07-12 02:30:25 +02:00
|
|
|
from collections.abc import Collection, Iterator
|
2023-11-19 19:45:19 +01:00
|
|
|
from datetime import datetime, timedelta, timezone
|
2024-07-12 02:30:25 +02:00
|
|
|
from typing import Any, TypeAlias
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2013-11-16 00:54:12 +01:00
|
|
|
from django.conf import settings
|
2020-11-13 17:53:41 +01:00
|
|
|
from django.db import transaction
|
2024-04-17 05:28:33 +02:00
|
|
|
from django.db.models import Exists, OuterRef, QuerySet
|
2017-08-27 16:30:48 +02:00
|
|
|
from django.utils.timezone import now as timezone_now
|
2022-12-24 17:31:48 +01:00
|
|
|
from django.utils.translation import gettext as _
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2018-11-08 22:40:27 +01:00
|
|
|
from confirmation.models import one_click_unsubscribe_link
|
2016-11-08 10:07:47 +01:00
|
|
|
from zerver.context_processors import common_context
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.email_notifications import build_message_list
|
2017-12-13 01:45:57 +01:00
|
|
|
from zerver.lib.logging_util import log_to_file
|
2020-11-12 12:11:35 +01:00
|
|
|
from zerver.lib.message import get_last_message_id
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.queue import queue_json_publish
|
|
|
|
from zerver.lib.send_email import FromAddress, send_future_email
|
2024-04-16 14:55:50 +02:00
|
|
|
from zerver.lib.url_encoding import stream_narrow_url
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.models import (
|
|
|
|
Message,
|
|
|
|
Realm,
|
|
|
|
RealmAuditLog,
|
|
|
|
Recipient,
|
2020-11-13 16:52:13 +01:00
|
|
|
Stream,
|
2020-06-11 00:54:34 +02:00
|
|
|
Subscription,
|
2020-11-12 16:55:32 +01:00
|
|
|
UserActivityInterval,
|
2020-06-11 00:54:34 +02:00
|
|
|
UserProfile,
|
|
|
|
)
|
2024-09-03 15:33:25 +02:00
|
|
|
from zerver.models.realm_audit_logs import AuditLogEventType
|
2023-12-15 03:57:04 +01:00
|
|
|
from zerver.models.streams import get_active_streams
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2017-12-13 01:45:57 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
log_to_file(logger, settings.DIGEST_LOG_PATH)
|
2014-01-08 17:33:12 +01:00
|
|
|
|
2017-08-27 16:30:48 +02:00
|
|
|
DIGEST_CUTOFF = 5
|
2021-02-08 19:57:23 +01:00
|
|
|
MAX_HOT_TOPICS_TO_BE_INCLUDED_IN_DIGEST = 4
|
2017-08-27 16:30:48 +02:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
TopicKey: TypeAlias = tuple[int, str]
|
2020-11-03 14:57:11 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-11-06 15:45:42 +01:00
|
|
|
class DigestTopic:
|
|
|
|
def __init__(self, topic_key: TopicKey) -> None:
|
|
|
|
self.topic_key = topic_key
|
2024-07-12 02:30:17 +02:00
|
|
|
self.human_senders: set[str] = set()
|
|
|
|
self.sample_messages: list[Message] = []
|
2020-11-06 15:45:42 +01:00
|
|
|
self.num_human_messages = 0
|
|
|
|
|
2020-11-13 12:07:50 +01:00
|
|
|
def stream_id(self) -> int:
|
|
|
|
# topic_key is (stream_id, topic_name)
|
|
|
|
return self.topic_key[0]
|
|
|
|
|
2020-11-06 15:45:42 +01:00
|
|
|
def add_message(self, message: Message) -> None:
|
|
|
|
if len(self.sample_messages) < 2:
|
|
|
|
self.sample_messages.append(message)
|
|
|
|
|
2023-12-14 00:52:04 +01:00
|
|
|
if not message.sender.is_bot:
|
2020-11-06 15:45:42 +01:00
|
|
|
self.human_senders.add(message.sender.full_name)
|
|
|
|
self.num_human_messages += 1
|
|
|
|
|
|
|
|
def length(self) -> int:
|
|
|
|
return self.num_human_messages
|
|
|
|
|
|
|
|
def diversity(self) -> int:
|
|
|
|
return len(self.human_senders)
|
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def teaser_data(self, user: UserProfile, stream_id_map: dict[int, Stream]) -> dict[str, Any]:
|
2020-11-06 15:45:42 +01:00
|
|
|
teaser_count = self.num_human_messages - len(self.sample_messages)
|
|
|
|
first_few_messages = build_message_list(
|
2020-11-13 16:52:13 +01:00
|
|
|
user=user,
|
|
|
|
messages=self.sample_messages,
|
2023-08-31 19:53:20 +02:00
|
|
|
stream_id_map=stream_id_map,
|
2020-11-06 15:45:42 +01:00
|
|
|
)
|
|
|
|
return {
|
2020-12-01 06:12:49 +01:00
|
|
|
"participants": sorted(self.human_senders),
|
2020-11-06 15:45:42 +01:00
|
|
|
"count": teaser_count,
|
|
|
|
"first_few_messages": first_few_messages,
|
|
|
|
}
|
2020-11-03 17:13:22 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-05-02 05:54:48 +02:00
|
|
|
# Digests accumulate 2 types of interesting traffic for a user:
|
|
|
|
# 1. New streams
|
|
|
|
# 2. Interesting stream traffic, as determined by the longest and most
|
2013-10-21 23:25:53 +02:00
|
|
|
# diversely comment upon topics.
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-08-25 21:52:47 +02:00
|
|
|
# Changes to this should also be reflected in
|
2024-04-16 20:49:37 +02:00
|
|
|
# zerver/worker/digest_emails.py:DigestWorker.consume()
|
2024-07-12 02:30:17 +02:00
|
|
|
def queue_digest_user_ids(user_ids: list[int], cutoff: datetime) -> None:
|
2017-08-25 21:52:47 +02:00
|
|
|
# Convert cutoff to epoch seconds for transit.
|
2021-02-12 08:20:45 +01:00
|
|
|
event = {"user_ids": user_ids, "cutoff": cutoff.strftime("%s")}
|
2017-11-24 13:18:46 +01:00
|
|
|
queue_json_publish("digest_emails", event)
|
2017-08-25 21:52:47 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2023-11-19 19:45:19 +01:00
|
|
|
def enqueue_emails(cutoff: datetime) -> None:
|
2018-03-09 00:16:44 +01:00
|
|
|
if not settings.SEND_DIGEST_EMAILS:
|
|
|
|
return
|
|
|
|
|
2019-03-28 04:47:03 +01:00
|
|
|
weekday = timezone_now().weekday()
|
2021-02-12 08:19:30 +01:00
|
|
|
for realm in Realm.objects.filter(
|
|
|
|
deactivated=False, digest_emails_enabled=True, digest_weekday=weekday
|
2023-08-31 03:09:07 +02:00
|
|
|
).exclude(string_id__in=settings.SYSTEM_ONLY_REALMS):
|
|
|
|
_enqueue_emails_for_realm(realm, cutoff)
|
2020-11-12 13:01:07 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2023-11-19 19:45:19 +01:00
|
|
|
def _enqueue_emails_for_realm(realm: Realm, cutoff: datetime) -> None:
|
2020-11-12 13:01:07 +01:00
|
|
|
# This should only be called directly by tests. Use enqueue_emails
|
|
|
|
# to process all realms that are set up for processing on any given day.
|
2023-11-19 19:45:19 +01:00
|
|
|
twelve_hours_ago = timezone_now() - timedelta(hours=12)
|
2023-08-31 03:50:10 +02:00
|
|
|
|
|
|
|
target_users = (
|
2021-02-12 08:19:30 +01:00
|
|
|
UserProfile.objects.filter(
|
|
|
|
realm=realm,
|
|
|
|
is_active=True,
|
|
|
|
is_bot=False,
|
|
|
|
enable_digest_emails=True,
|
|
|
|
)
|
2023-08-31 03:50:10 +02:00
|
|
|
.alias(
|
|
|
|
recent_activity=Exists(
|
|
|
|
UserActivityInterval.objects.filter(user_profile_id=OuterRef("id"), end__gt=cutoff)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
.filter(recent_activity=False)
|
|
|
|
.alias(
|
|
|
|
sent_recent_digest=Exists(
|
|
|
|
RealmAuditLog.objects.filter(
|
|
|
|
realm_id=realm.id,
|
2024-09-03 15:33:25 +02:00
|
|
|
event_type=AuditLogEventType.USER_DIGEST_EMAIL_CREATED,
|
2023-08-31 03:50:10 +02:00
|
|
|
event_time__gt=twelve_hours_ago,
|
|
|
|
modified_user_id=OuterRef("id"),
|
|
|
|
)
|
|
|
|
)
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2023-08-31 03:50:10 +02:00
|
|
|
.filter(sent_recent_digest=False)
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2020-11-12 16:55:32 +01:00
|
|
|
|
2023-08-31 03:50:10 +02:00
|
|
|
user_ids = target_users.order_by("id").values_list("id", flat=True)
|
2020-11-12 13:01:07 +01:00
|
|
|
|
2020-11-13 18:13:13 +01:00
|
|
|
# We process batches of 30. We want a big enough batch
|
2022-02-08 00:13:33 +01:00
|
|
|
# to amortize work, but not so big that a single item
|
2020-11-13 18:13:13 +01:00
|
|
|
# from the queue takes too long to process.
|
|
|
|
chunk_size = 30
|
|
|
|
for i in range(0, len(user_ids), chunk_size):
|
2023-08-31 03:50:10 +02:00
|
|
|
chunk_user_ids = list(user_ids[i : i + chunk_size])
|
2020-11-13 18:13:13 +01:00
|
|
|
queue_digest_user_ids(chunk_user_ids, cutoff)
|
2020-11-12 13:01:07 +01:00
|
|
|
logger.info(
|
2020-11-13 18:13:13 +01:00
|
|
|
"Queuing user_ids for potential digest: %s",
|
|
|
|
chunk_user_ids,
|
2020-11-12 13:01:07 +01:00
|
|
|
)
|
2017-08-27 16:30:48 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:23 +02:00
|
|
|
last_realm_id: int | None = None
|
|
|
|
last_cutoff: float | None = None
|
2023-09-08 21:37:28 +02:00
|
|
|
|
|
|
|
|
|
|
|
def maybe_clear_recent_topics_cache(realm_id: int, cutoff: float) -> None:
|
|
|
|
# As an optimization, we clear the digest caches when we switch to
|
|
|
|
# a new realm or cutoff value. Since these values are part of the
|
|
|
|
# cache key, this is not necessary for correctness -- it merely
|
|
|
|
# helps reduce the memory footprint of the cache.
|
|
|
|
global last_realm_id, last_cutoff
|
|
|
|
if last_realm_id != realm_id or last_cutoff != cutoff:
|
|
|
|
logger.info("Flushing stream cache: %s", get_recent_topics.cache_info())
|
|
|
|
get_recent_topics.cache_clear()
|
|
|
|
last_realm_id = realm_id
|
|
|
|
last_cutoff = cutoff
|
|
|
|
|
|
|
|
|
2023-08-31 20:37:08 +02:00
|
|
|
# We cache both by stream-id and cutoff, which ensures the per-stream
|
|
|
|
# cache also does not contain data from old digests
|
2023-09-08 21:50:38 +02:00
|
|
|
@functools.lru_cache(maxsize=5000)
|
2020-11-06 15:45:42 +01:00
|
|
|
def get_recent_topics(
|
2023-08-30 21:19:37 +02:00
|
|
|
realm_id: int,
|
2023-08-31 20:37:08 +02:00
|
|
|
stream_id: int,
|
2023-11-19 19:45:19 +01:00
|
|
|
cutoff_date: datetime,
|
2024-07-12 02:30:17 +02:00
|
|
|
) -> list[DigestTopic]:
|
2020-11-03 17:13:22 +01:00
|
|
|
# Gather information about topic conversations, then
|
|
|
|
# classify by:
|
|
|
|
# * topic length
|
|
|
|
# * number of senders
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
messages = (
|
2023-08-30 21:19:37 +02:00
|
|
|
# Uses index: zerver_message_realm_recipient_date_sent
|
2021-02-12 08:19:30 +01:00
|
|
|
Message.objects.filter(
|
2023-08-30 21:19:37 +02:00
|
|
|
realm_id=realm_id,
|
2021-02-12 08:19:30 +01:00
|
|
|
recipient__type=Recipient.STREAM,
|
2023-08-31 20:37:08 +02:00
|
|
|
recipient__type_id=stream_id,
|
2021-02-12 08:19:30 +01:00
|
|
|
date_sent__gt=cutoff_date,
|
|
|
|
)
|
|
|
|
.order_by(
|
2021-02-12 08:20:45 +01:00
|
|
|
"id", # we will sample the first few messages
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
.select_related(
|
2023-08-31 20:37:08 +02:00
|
|
|
"recipient", # build_message_list looks up recipient.type
|
2021-02-12 08:20:45 +01:00
|
|
|
"sender", # we need the sender's full name
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2023-08-31 16:11:10 +02:00
|
|
|
.defer(
|
2023-12-14 00:52:04 +01:00
|
|
|
# This construction, to only fetch the sender's full_name and is_bot,
|
2023-08-31 16:11:10 +02:00
|
|
|
# is because `.only()` doesn't work with select_related tables.
|
2023-12-14 00:52:04 +01:00
|
|
|
*{
|
|
|
|
f"sender__{f.name}"
|
|
|
|
for f in UserProfile._meta.fields
|
|
|
|
if f.name not in {"full_name", "is_bot"}
|
|
|
|
}
|
2023-08-31 16:11:10 +02:00
|
|
|
)
|
2020-11-06 17:17:01 +01:00
|
|
|
)
|
2020-11-03 13:51:59 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
digest_topic_map: dict[TopicKey, DigestTopic] = {}
|
2018-11-11 19:06:59 +01:00
|
|
|
for message in messages:
|
2023-08-31 20:37:08 +02:00
|
|
|
topic_key = (stream_id, message.topic_name())
|
2018-11-11 19:06:59 +01:00
|
|
|
|
2020-11-06 15:45:42 +01:00
|
|
|
if topic_key not in digest_topic_map:
|
|
|
|
digest_topic_map[topic_key] = DigestTopic(topic_key)
|
2018-11-11 18:46:21 +01:00
|
|
|
|
2020-11-06 15:45:42 +01:00
|
|
|
digest_topic_map[topic_key].add_message(message)
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2020-11-06 15:45:42 +01:00
|
|
|
topics = list(digest_topic_map.values())
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2020-11-06 15:45:42 +01:00
|
|
|
return topics
|
2020-11-03 17:13:22 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-11-13 12:07:50 +01:00
|
|
|
def get_hot_topics(
|
2024-07-12 02:30:17 +02:00
|
|
|
all_topics: list[DigestTopic],
|
|
|
|
stream_ids: set[int],
|
|
|
|
) -> list[DigestTopic]:
|
2021-02-12 08:19:30 +01:00
|
|
|
topics = [topic for topic in all_topics if topic.stream_id() in stream_ids]
|
2020-11-06 12:44:48 +01:00
|
|
|
|
2021-02-08 20:02:17 +01:00
|
|
|
hot_topics = heapq.nlargest(2, topics, key=DigestTopic.diversity)
|
2020-11-06 12:44:48 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
for topic in heapq.nlargest(
|
|
|
|
MAX_HOT_TOPICS_TO_BE_INCLUDED_IN_DIGEST, topics, key=DigestTopic.length
|
|
|
|
):
|
2020-11-06 15:45:42 +01:00
|
|
|
if topic not in hot_topics:
|
|
|
|
hot_topics.append(topic)
|
2021-02-08 19:57:23 +01:00
|
|
|
if len(hot_topics) == MAX_HOT_TOPICS_TO_BE_INCLUDED_IN_DIGEST:
|
2013-10-21 23:25:53 +02:00
|
|
|
break
|
|
|
|
|
2020-11-03 18:03:42 +01:00
|
|
|
return hot_topics
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_recently_created_streams(realm: Realm, threshold: datetime) -> list[Stream]:
|
2020-11-13 17:25:52 +01:00
|
|
|
fields = ["id", "name", "is_web_public", "invite_only"]
|
|
|
|
return list(get_active_streams(realm).filter(date_created__gt=threshold).only(*fields))
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-11-13 17:25:52 +01:00
|
|
|
def gather_new_streams(
|
|
|
|
realm: Realm,
|
2024-07-12 02:30:17 +02:00
|
|
|
recently_created_streams: list[Stream], # streams only need id and name
|
2020-11-13 17:25:52 +01:00
|
|
|
can_access_public: bool,
|
2024-07-12 02:30:17 +02:00
|
|
|
) -> tuple[int, dict[str, list[str]]]:
|
2020-11-13 17:25:52 +01:00
|
|
|
if can_access_public:
|
2023-08-31 19:37:49 +02:00
|
|
|
new_streams = [stream for stream in recently_created_streams if not stream.invite_only]
|
2020-11-13 17:25:52 +01:00
|
|
|
else:
|
2023-08-31 19:37:49 +02:00
|
|
|
new_streams = [stream for stream in recently_created_streams if stream.is_web_public]
|
2020-11-13 17:25:52 +01:00
|
|
|
|
2024-04-16 14:55:50 +02:00
|
|
|
channels_html = []
|
|
|
|
channels_plain = []
|
2013-10-25 18:53:35 +02:00
|
|
|
|
2013-10-21 23:25:53 +02:00
|
|
|
for stream in new_streams:
|
2024-04-16 14:55:50 +02:00
|
|
|
narrow_url = stream_narrow_url(realm, stream)
|
|
|
|
channel_link = f"<a href='{narrow_url}'>{stream.name}</a>"
|
|
|
|
channels_html.append(channel_link)
|
|
|
|
channels_plain.append(stream.name)
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2024-04-16 14:55:50 +02:00
|
|
|
return len(new_streams), {"html": channels_html, "plain": channels_plain}
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-05-02 05:54:48 +02:00
|
|
|
def enough_traffic(hot_conversations: str, new_streams: int) -> bool:
|
|
|
|
return bool(hot_conversations or new_streams)
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_user_stream_map(user_ids: list[int], cutoff_date: datetime) -> dict[int, set[int]]:
|
2023-08-31 19:32:17 +02:00
|
|
|
"""Skipping streams where the user's subscription status has changed
|
|
|
|
when constructing digests is critical to ensure correctness for
|
|
|
|
streams without shared history, guest users, and long-term idle
|
|
|
|
users, because it means that every user has the same view of the
|
|
|
|
history of a given stream whose message history is being included
|
|
|
|
(and thus we can share a lot of work).
|
|
|
|
|
|
|
|
The downside is that newly created streams are never included in
|
|
|
|
the first digest email after their creation. Should we wish to
|
|
|
|
change that, we will need to be very careful to avoid creating
|
|
|
|
bugs for any of those classes of users.
|
|
|
|
"""
|
|
|
|
events = [
|
2024-09-03 17:25:32 +02:00
|
|
|
AuditLogEventType.SUBSCRIPTION_CREATED,
|
|
|
|
AuditLogEventType.SUBSCRIPTION_ACTIVATED,
|
|
|
|
AuditLogEventType.SUBSCRIPTION_DEACTIVATED,
|
2023-08-31 19:32:17 +02:00
|
|
|
]
|
|
|
|
# This uses the zerver_realmauditlog_user_subscriptions_idx
|
|
|
|
# partial index on RealmAuditLog which is specifically for those
|
|
|
|
# three event types.
|
|
|
|
rows = (
|
|
|
|
Subscription.objects.filter(
|
|
|
|
user_profile_id__in=user_ids,
|
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
active=True,
|
|
|
|
is_muted=False,
|
|
|
|
)
|
|
|
|
.alias(
|
|
|
|
was_modified=Exists(
|
|
|
|
RealmAuditLog.objects.filter(
|
|
|
|
modified_stream_id=OuterRef("recipient__type_id"),
|
|
|
|
modified_user_id=OuterRef("user_profile_id"),
|
|
|
|
event_time__gt=cutoff_date,
|
|
|
|
event_type__in=events,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
.filter(was_modified=False)
|
|
|
|
.values("user_profile_id", "recipient__type_id")
|
|
|
|
)
|
2020-11-13 12:30:59 +01:00
|
|
|
|
|
|
|
# maps user_id -> {stream_id, stream_id, ...}
|
2024-07-12 02:30:17 +02:00
|
|
|
dct: dict[int, set[int]] = defaultdict(set)
|
2020-11-13 12:30:59 +01:00
|
|
|
for row in rows:
|
2021-02-12 08:20:45 +01:00
|
|
|
dct[row["user_profile_id"]].add(row["recipient__type_id"])
|
2020-11-13 12:30:59 +01:00
|
|
|
|
|
|
|
return dct
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_slim_stream_id_map(realm: Realm) -> dict[int, Stream]:
|
2023-08-31 19:53:20 +02:00
|
|
|
# "slim" because it only fetches the names of the stream objects,
|
|
|
|
# suitable for passing into build_message_list.
|
2023-08-31 20:37:08 +02:00
|
|
|
streams = get_active_streams(realm).only("id", "name")
|
2020-11-13 16:52:13 +01:00
|
|
|
return {stream.id: stream for stream in streams}
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2022-06-23 20:07:19 +02:00
|
|
|
def bulk_get_digest_context(
|
2024-04-17 05:28:33 +02:00
|
|
|
users: Collection[UserProfile] | QuerySet[UserProfile], cutoff: float
|
2024-07-12 02:30:17 +02:00
|
|
|
) -> Iterator[tuple[UserProfile, dict[str, Any]]]:
|
2020-11-13 17:25:52 +01:00
|
|
|
# We expect a non-empty list of users all from the same realm.
|
2021-02-12 08:19:30 +01:00
|
|
|
assert users
|
2022-06-23 20:07:19 +02:00
|
|
|
realm = next(iter(users)).realm
|
2020-11-13 17:25:52 +01:00
|
|
|
for user in users:
|
|
|
|
assert user.realm_id == realm.id
|
|
|
|
|
2013-10-21 23:25:53 +02:00
|
|
|
# Convert from epoch seconds to a datetime object.
|
2023-11-19 19:45:19 +01:00
|
|
|
cutoff_date = datetime.fromtimestamp(int(cutoff), tz=timezone.utc)
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2023-09-08 21:37:28 +02:00
|
|
|
maybe_clear_recent_topics_cache(realm.id, cutoff)
|
|
|
|
|
2023-08-31 20:37:08 +02:00
|
|
|
stream_id_map = get_slim_stream_id_map(realm)
|
|
|
|
recently_created_streams = get_recently_created_streams(realm, cutoff_date)
|
2016-11-08 10:07:47 +01:00
|
|
|
|
2020-11-05 11:48:48 +01:00
|
|
|
user_ids = [user.id for user in users]
|
2023-08-31 19:32:17 +02:00
|
|
|
user_stream_map = get_user_stream_map(user_ids, cutoff_date)
|
2020-11-13 12:27:39 +01:00
|
|
|
|
2020-11-04 23:40:29 +01:00
|
|
|
for user in users:
|
2020-11-13 16:53:50 +01:00
|
|
|
stream_ids = user_stream_map[user.id]
|
2020-11-13 12:07:50 +01:00
|
|
|
|
2023-08-31 20:37:08 +02:00
|
|
|
recent_topics = []
|
|
|
|
for stream_id in stream_ids:
|
|
|
|
recent_topics += get_recent_topics(realm.id, stream_id, cutoff_date)
|
2020-11-13 12:07:50 +01:00
|
|
|
|
|
|
|
hot_topics = get_hot_topics(recent_topics, stream_ids)
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2020-11-13 11:20:38 +01:00
|
|
|
context = common_context(user)
|
|
|
|
|
|
|
|
# Start building email template data.
|
|
|
|
unsubscribe_link = one_click_unsubscribe_link(user, "digest")
|
|
|
|
context.update(unsubscribe_link=unsubscribe_link)
|
|
|
|
|
2020-11-06 15:45:42 +01:00
|
|
|
# Get context data for hot conversations.
|
|
|
|
context["hot_conversations"] = [
|
2023-08-31 19:53:20 +02:00
|
|
|
hot_topic.teaser_data(user, stream_id_map) for hot_topic in hot_topics
|
2020-11-06 15:45:42 +01:00
|
|
|
]
|
2013-10-21 23:25:53 +02:00
|
|
|
|
2020-11-04 23:40:29 +01:00
|
|
|
# Gather new streams.
|
2020-11-13 17:25:52 +01:00
|
|
|
new_streams_count, new_streams = gather_new_streams(
|
|
|
|
realm=realm,
|
2023-08-31 19:37:49 +02:00
|
|
|
recently_created_streams=recently_created_streams,
|
2021-02-12 08:19:30 +01:00
|
|
|
can_access_public=user.can_access_public_streams(),
|
2020-11-13 17:25:52 +01:00
|
|
|
)
|
2024-04-16 14:55:50 +02:00
|
|
|
context["new_channels"] = new_streams
|
2020-11-04 23:40:29 +01:00
|
|
|
context["new_streams_count"] = new_streams_count
|
2019-08-17 11:04:48 +02:00
|
|
|
|
2023-09-08 21:36:13 +02:00
|
|
|
yield user, context
|
2020-11-03 20:48:57 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_digest_context(user: UserProfile, cutoff: float) -> dict[str, Any]:
|
2023-10-05 16:10:52 +02:00
|
|
|
for ignored, context in bulk_get_digest_context([user], cutoff):
|
2023-09-08 21:36:13 +02:00
|
|
|
return context
|
|
|
|
raise AssertionError("Unreachable")
|
2020-11-04 23:40:29 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-11-01 12:12:53 +01:00
|
|
|
@transaction.atomic(durable=True)
|
2024-07-12 02:30:17 +02:00
|
|
|
def bulk_handle_digest_email(user_ids: list[int], cutoff: float) -> None:
|
2020-11-12 16:00:39 +01:00
|
|
|
# We go directly to the database to get user objects,
|
|
|
|
# since inactive users are likely to not be in the cache.
|
2022-04-15 22:47:51 +02:00
|
|
|
users = (
|
|
|
|
UserProfile.objects.filter(id__in=user_ids, is_active=True, realm__deactivated=False)
|
|
|
|
.order_by("id")
|
|
|
|
.select_related("realm")
|
|
|
|
)
|
2020-11-12 12:11:35 +01:00
|
|
|
digest_users = []
|
|
|
|
|
2023-09-08 21:36:13 +02:00
|
|
|
for user, context in bulk_get_digest_context(users, cutoff):
|
2020-11-04 23:40:29 +01:00
|
|
|
# We don't want to send emails containing almost no information.
|
2023-09-08 21:32:43 +02:00
|
|
|
if not enough_traffic(context["hot_conversations"], context["new_streams_count"]):
|
|
|
|
continue
|
|
|
|
|
|
|
|
digest_users.append(user)
|
|
|
|
logger.info("Sending digest email for user %s", user.id)
|
|
|
|
|
|
|
|
# Send now, as a ScheduledEmail
|
|
|
|
send_future_email(
|
|
|
|
"zerver/emails/digest",
|
|
|
|
user.realm,
|
|
|
|
to_user_ids=[user.id],
|
2022-12-24 17:31:48 +01:00
|
|
|
from_name=_("{service_name} digest").format(service_name=settings.INSTALLATION_NAME),
|
2023-09-08 21:32:43 +02:00
|
|
|
from_address=FromAddress.no_reply_placeholder,
|
|
|
|
context=context,
|
|
|
|
)
|
2020-11-04 23:40:29 +01:00
|
|
|
|
2020-11-12 12:11:35 +01:00
|
|
|
bulk_write_realm_audit_logs(digest_users)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def bulk_write_realm_audit_logs(users: list[UserProfile]) -> None:
|
2020-11-12 12:11:35 +01:00
|
|
|
if not users:
|
|
|
|
return
|
|
|
|
|
|
|
|
# We write RealmAuditLog rows for auditing, and we will also
|
|
|
|
# use these rows during the next run to possibly exclude the
|
|
|
|
# users (if insufficient time has passed).
|
|
|
|
last_message_id = get_last_message_id()
|
|
|
|
now = timezone_now()
|
|
|
|
|
|
|
|
log_rows = [
|
|
|
|
RealmAuditLog(
|
|
|
|
realm_id=user.realm_id,
|
|
|
|
modified_user_id=user.id,
|
|
|
|
event_last_message_id=last_message_id,
|
|
|
|
event_time=now,
|
2024-09-03 15:33:25 +02:00
|
|
|
event_type=AuditLogEventType.USER_DIGEST_EMAIL_CREATED,
|
2020-11-12 12:11:35 +01:00
|
|
|
)
|
|
|
|
for user in users
|
|
|
|
]
|
|
|
|
|
|
|
|
RealmAuditLog.objects.bulk_create(log_rows)
|