2020-12-19 03:05:20 +01:00
|
|
|
# See https://zulip.readthedocs.io/en/latest/subsystems/notifications.html
|
|
|
|
|
2021-12-19 12:04:36 +01:00
|
|
|
import logging
|
2022-07-01 01:21:42 +02:00
|
|
|
import os
|
2020-06-11 00:54:34 +02:00
|
|
|
import re
|
2022-06-26 01:14:30 +02:00
|
|
|
import subprocess
|
2022-07-01 01:21:42 +02:00
|
|
|
import sys
|
2024-07-12 02:30:32 +02:00
|
|
|
import zoneinfo
|
2020-06-11 00:54:34 +02:00
|
|
|
from collections import defaultdict
|
2023-07-12 19:05:57 +02:00
|
|
|
from dataclasses import dataclass
|
2020-06-11 00:54:34 +02:00
|
|
|
from datetime import timedelta
|
2020-06-05 23:26:35 +02:00
|
|
|
from email.headerregistry import Address
|
2024-07-12 02:30:23 +02:00
|
|
|
from typing import Any
|
2016-01-25 23:42:16 +01:00
|
|
|
|
2020-06-11 00:54:34 +02:00
|
|
|
import lxml.html
|
|
|
|
from bs4 import BeautifulSoup
|
2014-01-24 22:29:17 +01:00
|
|
|
from django.conf import settings
|
2020-06-11 00:54:34 +02:00
|
|
|
from django.contrib.auth import get_backends
|
2017-04-15 04:03:56 +02:00
|
|
|
from django.utils.timezone import now as timezone_now
|
2021-04-16 00:57:30 +02:00
|
|
|
from django.utils.translation import gettext as _
|
2020-02-12 19:52:39 +01:00
|
|
|
from django.utils.translation import override as override_language
|
2022-11-16 06:24:11 +01:00
|
|
|
from lxml.html import builder as e
|
2018-07-26 20:19:45 +02:00
|
|
|
|
2020-06-11 00:54:34 +02:00
|
|
|
from confirmation.models import one_click_unsubscribe_link
|
2023-12-15 03:04:08 +01:00
|
|
|
from zerver.lib.display_recipient import get_display_recipient
|
2020-07-15 01:21:28 +02:00
|
|
|
from zerver.lib.markdown.fenced_code import FENCE_RE
|
2018-07-26 20:19:45 +02:00
|
|
|
from zerver.lib.message import bulk_access_messages
|
2024-04-22 12:44:22 +02:00
|
|
|
from zerver.lib.notification_data import get_mentioned_user_group
|
2017-03-06 08:45:59 +01:00
|
|
|
from zerver.lib.queue import queue_json_publish
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.send_email import FromAddress, send_future_email
|
2022-04-15 04:51:41 +02:00
|
|
|
from zerver.lib.soft_deactivation import soft_reactivate_if_personal_notification
|
2023-11-27 06:23:24 +01:00
|
|
|
from zerver.lib.tex import change_katex_to_raw_latex
|
2024-02-28 01:36:17 +01:00
|
|
|
from zerver.lib.timezone import canonicalize_timezone
|
2022-10-28 00:25:31 +02:00
|
|
|
from zerver.lib.topic import get_topic_resolution_and_bare_name
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.url_encoding import (
|
2024-07-04 14:05:48 +02:00
|
|
|
direct_message_group_narrow_url,
|
2020-06-11 00:54:34 +02:00
|
|
|
personal_narrow_url,
|
|
|
|
stream_narrow_url,
|
|
|
|
topic_narrow_url,
|
|
|
|
)
|
2023-12-15 20:21:59 +01:00
|
|
|
from zerver.models import Message, Realm, Recipient, Stream, UserMessage, UserProfile
|
2023-12-15 19:52:47 +01:00
|
|
|
from zerver.models.messages import get_context_for_message
|
2023-12-15 20:21:59 +01:00
|
|
|
from zerver.models.scheduled_jobs import NotificationTriggers
|
2023-12-15 01:16:00 +01:00
|
|
|
from zerver.models.users import get_user_profile_by_id
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2021-12-19 12:04:36 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2014-01-24 22:29:17 +01:00
|
|
|
|
email_notifications: Handle empty rendered_messages.
The transforms called from `build_message_payload` use
`lxml.html.fromstring` to parse (and stringify, and re-parse) the HTML
generated by Markdown. However, this function fails if it is passed
an empty document. "empty" is broader than just the empty string; it
also includes any document made entirely out of control characters,
spaces, unpaired surrogates, U+FFFE, or U+FFFF, and so forth. These
documents would fail to parse, and raise a ParserError.
Using `lxml.html.fragment_fromstring` handles these cases, but does by
wrapping the contents in a <div> every time it is called. As such,
replacing each `fromstring` with `fragment_fromstring` would nest
another layer of `<div>`.
Instead of each of the helper functions re-parsing, modifying, and
stringifying the HTML, parse it once with `fragment_fromstring` and
pass around the parsed document to each helper, which modifies it
in-place. This adds one outer `<div>`, requiring minor changes to
tests and the prepend-sender functions.
The modification to add the sender is left using BeautifulSoup, as
that sort of transform is much less readable, and more fiddly, in raw
lxml.
Partial fix for #19559.
2021-08-19 20:10:35 +02:00
|
|
|
def relative_to_full_url(fragment: lxml.html.HtmlElement, base_url: str) -> None:
|
2017-10-11 22:41:19 +02:00
|
|
|
# We handle narrow URLs separately because of two reasons:
|
|
|
|
# 1: 'lxml' seems to be having an issue in dealing with URLs that begin
|
|
|
|
# `#` due to which it doesn't add a `/` before joining the base_url to
|
|
|
|
# the relative URL.
|
|
|
|
# 2: We also need to update the title attribute in the narrow links which
|
|
|
|
# is not possible with `make_links_absolute()`.
|
|
|
|
for link_info in fragment.iterlinks():
|
|
|
|
elem, attrib, link, pos = link_info
|
2024-04-26 20:30:22 +02:00
|
|
|
match = re.match(r"/?#narrow/", link)
|
2017-10-11 22:41:19 +02:00
|
|
|
if match is not None:
|
|
|
|
link = re.sub(r"^/?#narrow/", base_url + "/#narrow/", link)
|
|
|
|
elem.set(attrib, link)
|
|
|
|
# Only manually linked narrow URLs have title attribute set.
|
2021-02-12 08:20:45 +01:00
|
|
|
if elem.get("title") is not None:
|
|
|
|
elem.set("title", link)
|
2017-10-11 22:41:19 +02:00
|
|
|
|
2021-09-15 01:07:21 +02:00
|
|
|
# Because we were parsed with fragment_fromstring, we are
|
|
|
|
# guaranteed there is a top-level <div>, and the original
|
|
|
|
# top-level contents are within that.
|
|
|
|
if len(fragment) == 1 and fragment[0].get("class") == "message_inline_image":
|
|
|
|
# The next block handles most inline images, but for messages
|
|
|
|
# where the entire Markdown input was just the URL of an image
|
|
|
|
# (i.e. the entire body is a message_inline_image object), the
|
|
|
|
# entire message body will be that image element; here, we need a
|
|
|
|
# more drastic edit to the content.
|
|
|
|
inner = fragment[0]
|
|
|
|
image_link = inner.find("a").get("href")
|
|
|
|
image_title = inner.find("a").get("title")
|
2021-08-04 09:17:02 +02:00
|
|
|
title_attr = {} if image_title is None else {"title": image_title}
|
2021-09-15 01:07:21 +02:00
|
|
|
inner.clear()
|
|
|
|
inner.tag = "p"
|
2022-11-16 06:24:11 +01:00
|
|
|
inner.append(e.A(image_link, href=image_link, target="_blank", **title_attr))
|
2021-09-15 01:07:21 +02:00
|
|
|
else:
|
|
|
|
# Inline images can't be displayed in the emails as the request
|
|
|
|
# from the mail server can't be authenticated because it has no
|
|
|
|
# user_profile object linked to it. So we scrub the inline image
|
|
|
|
# container.
|
|
|
|
inline_image_containers = fragment.find_class("message_inline_image")
|
|
|
|
for container in inline_image_containers:
|
|
|
|
container.drop_tree()
|
2018-01-24 19:23:51 +01:00
|
|
|
|
2017-10-13 16:59:58 +02:00
|
|
|
fragment.make_links_absolute(base_url)
|
2017-09-21 00:02:25 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2023-02-07 22:34:16 +01:00
|
|
|
def fix_emojis(fragment: lxml.html.HtmlElement, emojiset: str) -> None:
|
2024-07-12 02:30:17 +02:00
|
|
|
def make_emoji_img_elem(emoji_span_elem: lxml.html.HtmlElement) -> dict[str, Any]:
|
2017-10-28 00:55:16 +02:00
|
|
|
# Convert the emoji spans to img tags.
|
2021-02-12 08:20:45 +01:00
|
|
|
classes = emoji_span_elem.get("class")
|
|
|
|
match = re.search(r"emoji-(?P<emoji_code>\S+)", classes)
|
2018-05-17 15:03:48 +02:00
|
|
|
# re.search is capable of returning None,
|
|
|
|
# but since the parent function should only be called with a valid css element
|
|
|
|
# we assert that it does not.
|
|
|
|
assert match is not None
|
2021-02-12 08:20:45 +01:00
|
|
|
emoji_code = match.group("emoji_code")
|
|
|
|
emoji_name = emoji_span_elem.get("title")
|
2017-10-28 00:55:16 +02:00
|
|
|
alt_code = emoji_span_elem.text
|
2023-02-07 22:34:16 +01:00
|
|
|
# We intentionally do not use staticfiles_storage.url here, so
|
|
|
|
# that we don't get any hashed version -- we want a path which
|
|
|
|
# may give us content which changes over time, but one which
|
|
|
|
# is guaranteed to keep working even if the prod-static
|
|
|
|
# directory is cleaned out (or a new server is rotated in
|
|
|
|
# which does not have historical content with old hashed
|
|
|
|
# filenames).
|
|
|
|
image_url = f"{settings.STATIC_URL}generated/emoji/images-{emojiset}-64/{emoji_code}.png"
|
2024-09-20 20:23:24 +02:00
|
|
|
img_elem = e.IMG(
|
|
|
|
alt=alt_code,
|
|
|
|
src=image_url,
|
|
|
|
title=emoji_name,
|
|
|
|
# We specify dimensions with these attributes, rather than
|
|
|
|
# CSS, because Outlook doesn't support these CSS properties.
|
|
|
|
height="20",
|
|
|
|
width="20",
|
|
|
|
)
|
2017-10-28 00:55:16 +02:00
|
|
|
img_elem.tail = emoji_span_elem.tail
|
|
|
|
return img_elem
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
for elem in fragment.cssselect("span.emoji"):
|
2017-10-28 00:55:16 +02:00
|
|
|
parent = elem.getparent()
|
|
|
|
img_elem = make_emoji_img_elem(elem)
|
|
|
|
parent.replace(elem, img_elem)
|
|
|
|
|
2024-09-20 20:23:24 +02:00
|
|
|
for realm_emoji in fragment.cssselect("img.emoji"):
|
2021-02-12 08:20:45 +01:00
|
|
|
del realm_emoji.attrib["class"]
|
2024-09-20 20:23:24 +02:00
|
|
|
# See above note about Outlook.
|
|
|
|
realm_emoji.set("height", "20")
|
|
|
|
realm_emoji.set("width", "20")
|
2017-10-28 00:55:16 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
email_notifications: Handle empty rendered_messages.
The transforms called from `build_message_payload` use
`lxml.html.fromstring` to parse (and stringify, and re-parse) the HTML
generated by Markdown. However, this function fails if it is passed
an empty document. "empty" is broader than just the empty string; it
also includes any document made entirely out of control characters,
spaces, unpaired surrogates, U+FFFE, or U+FFFF, and so forth. These
documents would fail to parse, and raise a ParserError.
Using `lxml.html.fragment_fromstring` handles these cases, but does by
wrapping the contents in a <div> every time it is called. As such,
replacing each `fromstring` with `fragment_fromstring` would nest
another layer of `<div>`.
Instead of each of the helper functions re-parsing, modifying, and
stringifying the HTML, parse it once with `fragment_fromstring` and
pass around the parsed document to each helper, which modifies it
in-place. This adds one outer `<div>`, requiring minor changes to
tests and the prepend-sender functions.
The modification to add the sender is left using BeautifulSoup, as
that sort of transform is much less readable, and more fiddly, in raw
lxml.
Partial fix for #19559.
2021-08-19 20:10:35 +02:00
|
|
|
def fix_spoilers_in_html(fragment: lxml.html.HtmlElement, language: str) -> None:
|
2020-07-15 01:21:28 +02:00
|
|
|
with override_language(language):
|
|
|
|
spoiler_title: str = _("Open Zulip to see the spoiler content")
|
|
|
|
spoilers = fragment.find_class("spoiler-block")
|
|
|
|
for spoiler in spoilers:
|
|
|
|
header = spoiler.find_class("spoiler-header")[0]
|
|
|
|
spoiler_content = spoiler.find_class("spoiler-content")[0]
|
|
|
|
header_content = header.find("p")
|
|
|
|
if header_content is None:
|
|
|
|
# Create a new element to append the spoiler to)
|
2022-11-16 06:24:11 +01:00
|
|
|
header_content = e.P()
|
2020-07-15 01:21:28 +02:00
|
|
|
header.append(header_content)
|
|
|
|
else:
|
2021-08-04 09:17:02 +02:00
|
|
|
# Add a space.
|
|
|
|
rear = header_content[-1] if len(header_content) else header_content
|
|
|
|
rear.tail = (rear.tail or "") + " "
|
2022-11-16 06:24:11 +01:00
|
|
|
span_elem = e.SPAN(f"({spoiler_title})", **e.CLASS("spoiler-title"), title=spoiler_title)
|
2020-07-15 01:21:28 +02:00
|
|
|
header_content.append(span_elem)
|
|
|
|
header.drop_tag()
|
|
|
|
spoiler_content.drop_tree()
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-07-15 01:21:28 +02:00
|
|
|
def fix_spoilers_in_text(content: str, language: str) -> str:
|
|
|
|
with override_language(language):
|
|
|
|
spoiler_title: str = _("Open Zulip to see the spoiler content")
|
2021-02-12 08:20:45 +01:00
|
|
|
lines = content.split("\n")
|
2020-07-15 01:21:28 +02:00
|
|
|
output = []
|
|
|
|
open_fence = None
|
|
|
|
for line in lines:
|
|
|
|
m = FENCE_RE.match(line)
|
|
|
|
if m:
|
2021-02-12 08:20:45 +01:00
|
|
|
fence = m.group("fence")
|
2024-07-12 02:30:23 +02:00
|
|
|
lang: str | None = m.group("lang")
|
2021-02-12 08:20:45 +01:00
|
|
|
if lang == "spoiler":
|
2020-07-15 01:21:28 +02:00
|
|
|
open_fence = fence
|
|
|
|
output.append(line)
|
|
|
|
output.append(f"({spoiler_title})")
|
|
|
|
elif fence == open_fence:
|
|
|
|
open_fence = None
|
|
|
|
output.append(line)
|
|
|
|
elif not open_fence:
|
|
|
|
output.append(line)
|
2021-02-12 08:20:45 +01:00
|
|
|
return "\n".join(output)
|
2020-07-15 01:21:28 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-08-04 12:06:41 +02:00
|
|
|
def add_quote_prefix_in_text(content: str) -> str:
|
|
|
|
"""
|
|
|
|
We add quote prefix ">" to each line of the message in plain text
|
|
|
|
format, such that email clients render the message as quote.
|
|
|
|
"""
|
|
|
|
lines = content.split("\n")
|
|
|
|
output = []
|
|
|
|
for line in lines:
|
|
|
|
quoted_line = f"> {line}"
|
|
|
|
output.append(quoted_line)
|
|
|
|
return "\n".join(output)
|
|
|
|
|
|
|
|
|
2020-11-13 16:52:13 +01:00
|
|
|
def build_message_list(
|
|
|
|
user: UserProfile,
|
2024-07-12 02:30:17 +02:00
|
|
|
messages: list[Message],
|
2024-07-12 02:30:23 +02:00
|
|
|
stream_id_map: dict[int, Stream] | None = None, # only needs id, name
|
2024-07-12 02:30:17 +02:00
|
|
|
) -> list[dict[str, Any]]:
|
2014-01-24 22:29:17 +01:00
|
|
|
"""
|
2021-04-20 23:27:25 +02:00
|
|
|
Builds the message list object for the message notification email template.
|
2014-01-24 22:29:17 +01:00
|
|
|
The messages are collapsed into per-recipient and per-sender blocks, like
|
|
|
|
our web interface
|
|
|
|
"""
|
2024-07-12 02:30:17 +02:00
|
|
|
messages_to_render: list[dict[str, Any]] = []
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def sender_string(message: Message) -> str:
|
2024-03-22 00:39:33 +01:00
|
|
|
if message.recipient.type in (Recipient.STREAM, Recipient.DIRECT_MESSAGE_GROUP):
|
2016-06-13 10:32:39 +02:00
|
|
|
return message.sender.full_name
|
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
return ""
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def fix_plaintext_image_urls(content: str) -> str:
|
2014-01-24 22:29:17 +01:00
|
|
|
# Replace image URLs in plaintext content of the form
|
|
|
|
# [image name](image url)
|
|
|
|
# with a simple hyperlink.
|
|
|
|
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
|
|
|
|
|
2021-08-20 02:11:35 +02:00
|
|
|
def prepend_sender_to_message(
|
2021-02-12 08:19:30 +01:00
|
|
|
message_plain: str, message_html: str, sender: str
|
2024-07-12 02:30:17 +02:00
|
|
|
) -> tuple[str, str]:
|
2020-08-04 12:06:41 +02:00
|
|
|
message_plain = f"{sender}:\n{message_plain}"
|
2019-07-11 13:04:11 +02:00
|
|
|
message_soup = BeautifulSoup(message_html, "html.parser")
|
2020-06-09 00:25:09 +02:00
|
|
|
sender_name_soup = BeautifulSoup(f"<b>{sender}</b>: ", "html.parser")
|
2019-07-11 13:04:11 +02:00
|
|
|
first_tag = message_soup.find()
|
email_notifications: Handle empty rendered_messages.
The transforms called from `build_message_payload` use
`lxml.html.fromstring` to parse (and stringify, and re-parse) the HTML
generated by Markdown. However, this function fails if it is passed
an empty document. "empty" is broader than just the empty string; it
also includes any document made entirely out of control characters,
spaces, unpaired surrogates, U+FFFE, or U+FFFF, and so forth. These
documents would fail to parse, and raise a ParserError.
Using `lxml.html.fragment_fromstring` handles these cases, but does by
wrapping the contents in a <div> every time it is called. As such,
replacing each `fromstring` with `fragment_fromstring` would nest
another layer of `<div>`.
Instead of each of the helper functions re-parsing, modifying, and
stringifying the HTML, parse it once with `fragment_fromstring` and
pass around the parsed document to each helper, which modifies it
in-place. This adds one outer `<div>`, requiring minor changes to
tests and the prepend-sender functions.
The modification to add the sender is left using BeautifulSoup, as
that sort of transform is much less readable, and more fiddly, in raw
lxml.
Partial fix for #19559.
2021-08-19 20:10:35 +02:00
|
|
|
if first_tag and first_tag.name == "div":
|
|
|
|
first_tag = first_tag.find()
|
|
|
|
if first_tag and first_tag.name == "p":
|
2019-07-11 13:04:11 +02:00
|
|
|
first_tag.insert(0, sender_name_soup)
|
|
|
|
else:
|
|
|
|
message_soup.insert(0, sender_name_soup)
|
|
|
|
return message_plain, str(message_soup)
|
|
|
|
|
2024-07-12 02:30:23 +02:00
|
|
|
def build_message_payload(message: Message, sender: str | None = None) -> dict[str, str]:
|
2014-01-24 22:29:17 +01:00
|
|
|
plain = message.content
|
|
|
|
plain = fix_plaintext_image_urls(plain)
|
2017-09-21 00:06:22 +02:00
|
|
|
# There's a small chance of colliding with non-Zulip URLs containing
|
|
|
|
# "/user_uploads/", but we don't have much information about the
|
|
|
|
# structure of the URL to leverage. We can't use `relative_to_full_url()`
|
|
|
|
# function here because it uses a stricter regex which will not work for
|
|
|
|
# plain text.
|
2024-05-06 15:27:22 +02:00
|
|
|
plain = re.sub(r"/user_uploads/(\S*)", user.realm.url + r"/user_uploads/\1", plain)
|
2020-11-13 16:34:09 +01:00
|
|
|
plain = fix_spoilers_in_text(plain, user.default_language)
|
2020-08-04 12:06:41 +02:00
|
|
|
plain = add_quote_prefix_in_text(plain)
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2017-05-26 02:08:16 +02:00
|
|
|
assert message.rendered_content is not None
|
email_notifications: Handle empty rendered_messages.
The transforms called from `build_message_payload` use
`lxml.html.fromstring` to parse (and stringify, and re-parse) the HTML
generated by Markdown. However, this function fails if it is passed
an empty document. "empty" is broader than just the empty string; it
also includes any document made entirely out of control characters,
spaces, unpaired surrogates, U+FFFE, or U+FFFF, and so forth. These
documents would fail to parse, and raise a ParserError.
Using `lxml.html.fragment_fromstring` handles these cases, but does by
wrapping the contents in a <div> every time it is called. As such,
replacing each `fromstring` with `fragment_fromstring` would nest
another layer of `<div>`.
Instead of each of the helper functions re-parsing, modifying, and
stringifying the HTML, parse it once with `fragment_fromstring` and
pass around the parsed document to each helper, which modifies it
in-place. This adds one outer `<div>`, requiring minor changes to
tests and the prepend-sender functions.
The modification to add the sender is left using BeautifulSoup, as
that sort of transform is much less readable, and more fiddly, in raw
lxml.
Partial fix for #19559.
2021-08-19 20:10:35 +02:00
|
|
|
fragment = lxml.html.fragment_fromstring(message.rendered_content, create_parent=True)
|
2024-05-06 15:27:22 +02:00
|
|
|
relative_to_full_url(fragment, user.realm.url)
|
2023-02-07 22:34:16 +01:00
|
|
|
fix_emojis(fragment, user.emojiset)
|
email_notifications: Handle empty rendered_messages.
The transforms called from `build_message_payload` use
`lxml.html.fromstring` to parse (and stringify, and re-parse) the HTML
generated by Markdown. However, this function fails if it is passed
an empty document. "empty" is broader than just the empty string; it
also includes any document made entirely out of control characters,
spaces, unpaired surrogates, U+FFFE, or U+FFFF, and so forth. These
documents would fail to parse, and raise a ParserError.
Using `lxml.html.fragment_fromstring` handles these cases, but does by
wrapping the contents in a <div> every time it is called. As such,
replacing each `fromstring` with `fragment_fromstring` would nest
another layer of `<div>`.
Instead of each of the helper functions re-parsing, modifying, and
stringifying the HTML, parse it once with `fragment_fromstring` and
pass around the parsed document to each helper, which modifies it
in-place. This adds one outer `<div>`, requiring minor changes to
tests and the prepend-sender functions.
The modification to add the sender is left using BeautifulSoup, as
that sort of transform is much less readable, and more fiddly, in raw
lxml.
Partial fix for #19559.
2021-08-19 20:10:35 +02:00
|
|
|
fix_spoilers_in_html(fragment, user.default_language)
|
2023-11-27 06:23:24 +01:00
|
|
|
change_katex_to_raw_latex(fragment)
|
2023-11-20 16:04:05 +01:00
|
|
|
|
email_notifications: Handle empty rendered_messages.
The transforms called from `build_message_payload` use
`lxml.html.fromstring` to parse (and stringify, and re-parse) the HTML
generated by Markdown. However, this function fails if it is passed
an empty document. "empty" is broader than just the empty string; it
also includes any document made entirely out of control characters,
spaces, unpaired surrogates, U+FFFE, or U+FFFF, and so forth. These
documents would fail to parse, and raise a ParserError.
Using `lxml.html.fragment_fromstring` handles these cases, but does by
wrapping the contents in a <div> every time it is called. As such,
replacing each `fromstring` with `fragment_fromstring` would nest
another layer of `<div>`.
Instead of each of the helper functions re-parsing, modifying, and
stringifying the HTML, parse it once with `fragment_fromstring` and
pass around the parsed document to each helper, which modifies it
in-place. This adds one outer `<div>`, requiring minor changes to
tests and the prepend-sender functions.
The modification to add the sender is left using BeautifulSoup, as
that sort of transform is much less readable, and more fiddly, in raw
lxml.
Partial fix for #19559.
2021-08-19 20:10:35 +02:00
|
|
|
html = lxml.html.tostring(fragment, encoding="unicode")
|
2019-07-11 13:04:11 +02:00
|
|
|
if sender:
|
2021-08-20 02:11:35 +02:00
|
|
|
plain, html = prepend_sender_to_message(plain, html, sender)
|
2021-02-12 08:20:45 +01:00
|
|
|
return {"plain": plain, "html": html}
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def build_sender_payload(message: Message) -> dict[str, Any]:
|
2014-01-24 22:29:17 +01:00
|
|
|
sender = sender_string(message)
|
2021-02-12 08:20:45 +01:00
|
|
|
return {"sender": sender, "content": [build_message_payload(message, sender)]}
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def message_header(message: Message) -> dict[str, Any]:
|
2014-01-24 22:29:17 +01:00
|
|
|
if message.recipient.type == Recipient.PERSONAL:
|
2024-07-12 02:30:17 +02:00
|
|
|
grouping: dict[str, Any] = {"user": message.sender_id}
|
2023-08-10 16:50:04 +02:00
|
|
|
narrow_link = personal_narrow_url(
|
|
|
|
realm=user.realm,
|
|
|
|
sender=message.sender,
|
|
|
|
)
|
2020-06-10 06:41:04 +02:00
|
|
|
header = f"You and {message.sender.full_name}"
|
|
|
|
header_html = f"<a style='color: #ffffff;' href='{narrow_link}'>{header}</a>"
|
2024-03-22 00:39:33 +01:00
|
|
|
elif message.recipient.type == Recipient.DIRECT_MESSAGE_GROUP:
|
2023-02-07 17:43:35 +01:00
|
|
|
grouping = {"huddle": message.recipient_id}
|
2019-06-04 14:45:42 +02:00
|
|
|
display_recipient = get_display_recipient(message.recipient)
|
2024-07-04 14:05:48 +02:00
|
|
|
narrow_link = direct_message_group_narrow_url(
|
2023-08-10 16:50:04 +02:00
|
|
|
user=user,
|
|
|
|
display_recipient=display_recipient,
|
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
other_recipients = [r["full_name"] for r in display_recipient if r["id"] != user.id]
|
2020-06-10 06:41:04 +02:00
|
|
|
header = "You and {}".format(", ".join(other_recipients))
|
|
|
|
header_html = f"<a style='color: #ffffff;' href='{narrow_link}'>{header}</a>"
|
2014-01-24 22:29:17 +01:00
|
|
|
else:
|
2023-08-10 16:50:04 +02:00
|
|
|
assert message.recipient.type == Recipient.STREAM
|
2023-02-07 17:43:35 +01:00
|
|
|
grouping = {"stream": message.recipient_id, "topic": message.topic_name().lower()}
|
2020-11-13 16:52:13 +01:00
|
|
|
stream_id = message.recipient.type_id
|
2023-08-31 19:54:02 +02:00
|
|
|
if stream_id_map is not None and stream_id in stream_id_map:
|
|
|
|
stream = stream_id_map[stream_id]
|
|
|
|
else:
|
2020-11-13 16:52:13 +01:00
|
|
|
# Some of our callers don't populate stream_map, so
|
|
|
|
# we just populate the stream from the database.
|
2021-02-12 08:20:45 +01:00
|
|
|
stream = Stream.objects.only("id", "name").get(id=stream_id)
|
2023-08-10 16:50:04 +02:00
|
|
|
narrow_link = topic_narrow_url(
|
|
|
|
realm=user.realm,
|
|
|
|
stream=stream,
|
2024-01-14 14:38:50 +01:00
|
|
|
topic_name=message.topic_name(),
|
2023-08-10 16:50:04 +02:00
|
|
|
)
|
2020-06-10 06:41:04 +02:00
|
|
|
header = f"{stream.name} > {message.topic_name()}"
|
2020-11-13 16:34:09 +01:00
|
|
|
stream_link = stream_narrow_url(user.realm, stream)
|
2020-06-10 06:41:04 +02:00
|
|
|
header_html = f"<a href='{stream_link}'>{stream.name}</a> > <a href='{narrow_link}'>{message.topic_name()}</a>"
|
2021-02-12 08:19:30 +01:00
|
|
|
return {
|
2023-02-07 17:43:35 +01:00
|
|
|
"grouping": grouping,
|
2021-02-12 08:19:30 +01:00
|
|
|
"plain": header,
|
|
|
|
"html": header_html,
|
|
|
|
"stream_message": message.recipient.type_name() == "stream",
|
|
|
|
}
|
2014-01-24 22:29:17 +01:00
|
|
|
|
|
|
|
# # Collapse message list to
|
|
|
|
# [
|
|
|
|
# {
|
|
|
|
# "header": {
|
|
|
|
# "plain":"header",
|
|
|
|
# "html":"htmlheader"
|
|
|
|
# }
|
|
|
|
# "senders":[
|
|
|
|
# {
|
|
|
|
# "sender":"sender_name",
|
|
|
|
# "content":[
|
|
|
|
# {
|
|
|
|
# "plain":"content",
|
|
|
|
# "html":"htmlcontent"
|
|
|
|
# }
|
|
|
|
# {
|
|
|
|
# "plain":"content",
|
|
|
|
# "html":"htmlcontent"
|
|
|
|
# }
|
|
|
|
# ]
|
|
|
|
# }
|
|
|
|
# ]
|
|
|
|
# },
|
|
|
|
# ]
|
|
|
|
|
2019-08-28 02:43:19 +02:00
|
|
|
messages.sort(key=lambda message: message.date_sent)
|
2014-01-24 22:29:17 +01:00
|
|
|
|
|
|
|
for message in messages:
|
2020-11-13 16:34:09 +01:00
|
|
|
header = message_header(message)
|
2014-01-24 22:29:17 +01:00
|
|
|
|
|
|
|
# If we want to collapse into the previous recipient block
|
2023-02-07 17:43:35 +01:00
|
|
|
if (
|
|
|
|
len(messages_to_render) > 0
|
|
|
|
and messages_to_render[-1]["header"]["grouping"] == header["grouping"]
|
|
|
|
):
|
2014-01-24 22:29:17 +01:00
|
|
|
sender = sender_string(message)
|
2021-02-12 08:20:45 +01:00
|
|
|
sender_block = messages_to_render[-1]["senders"]
|
2014-01-24 22:29:17 +01:00
|
|
|
|
|
|
|
# Same message sender, collapse again
|
2021-02-12 08:20:45 +01:00
|
|
|
if sender_block[-1]["sender"] == sender:
|
|
|
|
sender_block[-1]["content"].append(build_message_payload(message))
|
2014-01-24 22:29:17 +01:00
|
|
|
else:
|
|
|
|
# Start a new sender block
|
|
|
|
sender_block.append(build_sender_payload(message))
|
|
|
|
else:
|
|
|
|
# New recipient and sender block
|
2021-02-12 08:20:45 +01:00
|
|
|
recipient_block = {"header": header, "senders": [build_sender_payload(message)]}
|
2014-01-24 22:29:17 +01:00
|
|
|
|
|
|
|
messages_to_render.append(recipient_block)
|
|
|
|
|
|
|
|
return messages_to_render
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-01-14 14:04:08 +01:00
|
|
|
def message_content_allowed_in_missedmessage_emails(user_profile: UserProfile) -> bool:
|
2021-02-12 08:19:30 +01:00
|
|
|
return (
|
|
|
|
user_profile.realm.message_content_allowed_in_email_notifications
|
|
|
|
and user_profile.message_content_in_email_notifications
|
|
|
|
)
|
|
|
|
|
2019-01-14 14:04:08 +01:00
|
|
|
|
2023-01-14 20:36:37 +01:00
|
|
|
def include_realm_name_in_missedmessage_emails_subject(user_profile: UserProfile) -> bool:
|
|
|
|
# Determines whether to include the realm name in the subject line
|
|
|
|
# of missedmessage email notifications, based on the user's
|
|
|
|
# realm_name_in_email_notifications_policy settings and whether the
|
|
|
|
# user's delivery_email is associated with other active realms.
|
|
|
|
if (
|
|
|
|
user_profile.realm_name_in_email_notifications_policy
|
|
|
|
== UserProfile.REALM_NAME_IN_EMAIL_NOTIFICATIONS_POLICY_AUTOMATIC
|
|
|
|
):
|
|
|
|
realms_count = UserProfile.objects.filter(
|
|
|
|
delivery_email=user_profile.delivery_email,
|
|
|
|
is_active=True,
|
|
|
|
is_bot=False,
|
|
|
|
realm__deactivated=False,
|
|
|
|
).count()
|
|
|
|
return realms_count > 1
|
|
|
|
return (
|
|
|
|
user_profile.realm_name_in_email_notifications_policy
|
|
|
|
== UserProfile.REALM_NAME_IN_EMAIL_NOTIFICATIONS_POLICY_ALWAYS
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def do_send_missedmessage_events_reply_in_zulip(
|
2024-07-12 02:30:17 +02:00
|
|
|
user_profile: UserProfile, missed_messages: list[dict[str, Any]], message_count: int
|
2021-02-12 08:19:30 +01:00
|
|
|
) -> None:
|
2014-08-11 14:15:16 +02:00
|
|
|
"""
|
2023-06-19 16:42:11 +02:00
|
|
|
Send a reminder email to a user if she's missed some direct messages
|
|
|
|
by being offline.
|
2014-08-11 14:15:16 +02:00
|
|
|
|
|
|
|
The email will have its reply to address set to a limited used email
|
2020-10-23 02:43:28 +02:00
|
|
|
address that will send a Zulip message to the correct recipient. This
|
2024-07-04 14:05:48 +02:00
|
|
|
allows the user to respond to missed direct messages, direct message
|
|
|
|
groups, and @-mentions directly from the email.
|
2014-08-11 14:15:16 +02:00
|
|
|
|
|
|
|
`user_profile` is the user to send the reminder to
|
2018-07-14 07:31:10 +02:00
|
|
|
`missed_messages` is a list of dictionaries to Message objects and other data
|
|
|
|
for a group of messages that share a recipient (and topic)
|
2014-08-11 14:15:16 +02:00
|
|
|
"""
|
2016-11-08 10:07:47 +01:00
|
|
|
from zerver.context_processors import common_context
|
2020-06-11 00:54:34 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
recipients = {
|
2023-02-07 17:43:35 +01:00
|
|
|
(msg["message"].recipient_id, msg["message"].topic_name().lower())
|
|
|
|
for msg in missed_messages
|
2021-02-12 08:19:30 +01:00
|
|
|
}
|
2022-08-25 21:24:53 +02:00
|
|
|
assert len(recipients) == 1, f"Unexpectedly multiple recipients: {recipients!r}"
|
2014-08-11 14:15:16 +02:00
|
|
|
|
2019-07-20 02:16:17 +02:00
|
|
|
# This link is no longer a part of the email, but keeping the code in case
|
|
|
|
# we find a clean way to add it back in the future
|
2016-08-14 09:08:34 +02:00
|
|
|
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
|
2017-05-04 23:37:01 +02:00
|
|
|
context = common_context(user_profile)
|
2020-09-03 05:32:15 +02:00
|
|
|
context.update(
|
|
|
|
name=user_profile.full_name,
|
|
|
|
message_count=message_count,
|
|
|
|
unsubscribe_link=unsubscribe_link,
|
2023-01-14 20:36:37 +01:00
|
|
|
include_realm_name_in_missedmessage_emails_subject=include_realm_name_in_missedmessage_emails_subject(
|
|
|
|
user_profile
|
|
|
|
),
|
2020-09-03 05:32:15 +02:00
|
|
|
)
|
2014-08-11 14:15:16 +02:00
|
|
|
|
2024-04-22 12:44:22 +02:00
|
|
|
mentioned_user_group_name = None
|
2023-12-08 20:53:31 +01:00
|
|
|
mentioned_user_group_members_count = None
|
2024-04-22 12:44:22 +02:00
|
|
|
mentioned_user_group = get_mentioned_user_group(missed_messages, user_profile)
|
|
|
|
if mentioned_user_group is not None:
|
|
|
|
mentioned_user_group_name = mentioned_user_group.name
|
2023-12-08 20:53:31 +01:00
|
|
|
mentioned_user_group_members_count = mentioned_user_group.members_count
|
2024-04-22 12:44:22 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
triggers = [message["trigger"] for message in missed_messages]
|
2018-07-14 07:46:13 +02:00
|
|
|
unique_triggers = set(triggers)
|
2020-05-21 14:51:36 +02:00
|
|
|
|
2022-09-28 17:25:09 +02:00
|
|
|
personal_mentioned = any(
|
2023-07-24 14:26:43 +02:00
|
|
|
message["trigger"] == NotificationTriggers.MENTION
|
|
|
|
and message["mentioned_user_group_id"] is None
|
2022-09-28 17:25:09 +02:00
|
|
|
for message in missed_messages
|
|
|
|
)
|
|
|
|
|
2023-06-07 19:19:33 +02:00
|
|
|
mention = (
|
2023-07-24 14:26:43 +02:00
|
|
|
NotificationTriggers.MENTION in unique_triggers
|
|
|
|
or NotificationTriggers.TOPIC_WILDCARD_MENTION in unique_triggers
|
|
|
|
or NotificationTriggers.STREAM_WILDCARD_MENTION in unique_triggers
|
|
|
|
or NotificationTriggers.TOPIC_WILDCARD_MENTION_IN_FOLLOWED_TOPIC in unique_triggers
|
|
|
|
or NotificationTriggers.STREAM_WILDCARD_MENTION_IN_FOLLOWED_TOPIC in unique_triggers
|
2023-06-07 19:19:33 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
context.update(
|
|
|
|
mention=mention,
|
2022-09-28 17:25:09 +02:00
|
|
|
personal_mentioned=personal_mentioned,
|
2023-07-24 14:26:43 +02:00
|
|
|
topic_wildcard_mentioned=NotificationTriggers.TOPIC_WILDCARD_MENTION in unique_triggers,
|
|
|
|
stream_wildcard_mentioned=NotificationTriggers.STREAM_WILDCARD_MENTION in unique_triggers,
|
|
|
|
stream_email_notify=NotificationTriggers.STREAM_EMAIL in unique_triggers,
|
|
|
|
followed_topic_email_notify=NotificationTriggers.FOLLOWED_TOPIC_EMAIL in unique_triggers,
|
|
|
|
topic_wildcard_mentioned_in_followed_topic=NotificationTriggers.TOPIC_WILDCARD_MENTION_IN_FOLLOWED_TOPIC
|
2023-06-07 19:19:33 +02:00
|
|
|
in unique_triggers,
|
2023-07-24 14:26:43 +02:00
|
|
|
stream_wildcard_mentioned_in_followed_topic=NotificationTriggers.STREAM_WILDCARD_MENTION_IN_FOLLOWED_TOPIC
|
2023-06-03 16:51:38 +02:00
|
|
|
in unique_triggers,
|
2020-05-21 14:51:36 +02:00
|
|
|
mentioned_user_group_name=mentioned_user_group_name,
|
2020-09-03 05:32:15 +02:00
|
|
|
)
|
2018-07-14 07:46:13 +02:00
|
|
|
|
2017-03-08 04:46:49 +01:00
|
|
|
# If this setting (email mirroring integration) is enabled, only then
|
|
|
|
# can users reply to email to send message to Zulip. Thus, one must
|
|
|
|
# ensure to display warning in the template.
|
|
|
|
if settings.EMAIL_GATEWAY_PATTERN:
|
2020-09-03 05:32:15 +02:00
|
|
|
context.update(
|
|
|
|
reply_to_zulip=True,
|
|
|
|
)
|
2017-03-08 04:46:49 +01:00
|
|
|
else:
|
2020-09-03 05:32:15 +02:00
|
|
|
context.update(
|
|
|
|
reply_to_zulip=False,
|
|
|
|
)
|
2017-03-08 04:46:49 +01:00
|
|
|
|
2014-08-11 14:15:16 +02:00
|
|
|
from zerver.lib.email_mirror import create_missed_message_address
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
reply_to_address = create_missed_message_address(user_profile, missed_messages[0]["message"])
|
2017-06-26 19:43:32 +02:00
|
|
|
if reply_to_address == FromAddress.NOREPLY:
|
2020-06-05 23:26:35 +02:00
|
|
|
reply_to_name = ""
|
2017-06-26 19:43:32 +02:00
|
|
|
else:
|
|
|
|
reply_to_name = "Zulip"
|
2014-08-11 14:15:16 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
senders = list({m["message"].sender for m in missed_messages})
|
2024-03-22 00:39:33 +01:00
|
|
|
if missed_messages[0]["message"].recipient.type == Recipient.DIRECT_MESSAGE_GROUP:
|
2021-02-12 08:20:45 +01:00
|
|
|
display_recipient = get_display_recipient(missed_messages[0]["message"].recipient)
|
2024-07-04 14:05:48 +02:00
|
|
|
narrow_url = direct_message_group_narrow_url(
|
2023-08-10 16:50:04 +02:00
|
|
|
user=user_profile,
|
|
|
|
display_recipient=display_recipient,
|
|
|
|
)
|
|
|
|
context.update(narrow_url=narrow_url)
|
2021-02-12 08:20:45 +01:00
|
|
|
other_recipients = [r["full_name"] for r in display_recipient if r["id"] != user_profile.id]
|
2020-09-03 05:32:15 +02:00
|
|
|
context.update(group_pm=True)
|
2017-05-03 09:22:58 +02:00
|
|
|
if len(other_recipients) == 2:
|
2024-07-04 14:05:48 +02:00
|
|
|
direct_message_group_display_name = " and ".join(other_recipients)
|
|
|
|
context.update(huddle_display_name=direct_message_group_display_name)
|
2017-05-03 09:22:58 +02:00
|
|
|
elif len(other_recipients) == 3:
|
2024-07-04 14:05:48 +02:00
|
|
|
direct_message_group_display_name = (
|
2021-02-12 08:19:30 +01:00
|
|
|
f"{other_recipients[0]}, {other_recipients[1]}, and {other_recipients[2]}"
|
|
|
|
)
|
2024-07-04 14:05:48 +02:00
|
|
|
context.update(huddle_display_name=direct_message_group_display_name)
|
2017-05-03 09:22:58 +02:00
|
|
|
else:
|
2024-07-04 14:05:48 +02:00
|
|
|
direct_message_group_display_name = "{}, and {} others".format(
|
2021-02-12 08:20:45 +01:00
|
|
|
", ".join(other_recipients[:2]), len(other_recipients) - 2
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2024-07-04 14:05:48 +02:00
|
|
|
context.update(huddle_display_name=direct_message_group_display_name)
|
2021-02-12 08:20:45 +01:00
|
|
|
elif missed_messages[0]["message"].recipient.type == Recipient.PERSONAL:
|
2023-08-10 16:50:04 +02:00
|
|
|
narrow_url = personal_narrow_url(
|
|
|
|
realm=user_profile.realm,
|
|
|
|
sender=missed_messages[0]["message"].sender,
|
|
|
|
)
|
|
|
|
context.update(narrow_url=narrow_url)
|
2020-09-03 05:32:15 +02:00
|
|
|
context.update(private_message=True)
|
2023-05-17 16:01:16 +02:00
|
|
|
elif (
|
|
|
|
context["mention"]
|
|
|
|
or context["stream_email_notify"]
|
|
|
|
or context["followed_topic_email_notify"]
|
|
|
|
):
|
2017-05-12 22:47:34 +02:00
|
|
|
# Keep only the senders who actually mentioned the user
|
2021-02-12 08:20:45 +01:00
|
|
|
if context["mention"]:
|
2021-02-12 08:19:30 +01:00
|
|
|
senders = list(
|
|
|
|
{
|
2021-02-12 08:20:45 +01:00
|
|
|
m["message"].sender
|
2021-02-12 08:19:30 +01:00
|
|
|
for m in missed_messages
|
2023-06-07 19:19:33 +02:00
|
|
|
if m["trigger"]
|
|
|
|
in [
|
|
|
|
NotificationTriggers.MENTION,
|
|
|
|
NotificationTriggers.TOPIC_WILDCARD_MENTION,
|
|
|
|
NotificationTriggers.STREAM_WILDCARD_MENTION,
|
|
|
|
NotificationTriggers.TOPIC_WILDCARD_MENTION_IN_FOLLOWED_TOPIC,
|
|
|
|
NotificationTriggers.STREAM_WILDCARD_MENTION_IN_FOLLOWED_TOPIC,
|
|
|
|
]
|
2021-02-12 08:19:30 +01:00
|
|
|
}
|
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
message = missed_messages[0]["message"]
|
2023-08-10 16:50:04 +02:00
|
|
|
assert message.recipient.type == Recipient.STREAM
|
2021-02-12 08:20:45 +01:00
|
|
|
stream = Stream.objects.only("id", "name").get(id=message.recipient.type_id)
|
2023-08-10 16:50:04 +02:00
|
|
|
narrow_url = topic_narrow_url(
|
|
|
|
realm=user_profile.realm,
|
|
|
|
stream=stream,
|
2024-01-14 14:38:50 +01:00
|
|
|
topic_name=message.topic_name(),
|
2023-08-10 16:50:04 +02:00
|
|
|
)
|
|
|
|
context.update(narrow_url=narrow_url)
|
2022-10-28 00:25:31 +02:00
|
|
|
topic_resolved, topic_name = get_topic_resolution_and_bare_name(message.topic_name())
|
2020-09-03 05:32:15 +02:00
|
|
|
context.update(
|
2024-04-15 21:05:46 +02:00
|
|
|
channel_name=stream.name,
|
2022-10-28 04:19:22 +02:00
|
|
|
topic_name=topic_name,
|
2022-10-28 00:25:31 +02:00
|
|
|
topic_resolved=topic_resolved,
|
2020-09-03 05:32:15 +02:00
|
|
|
)
|
2018-07-14 07:55:27 +02:00
|
|
|
else:
|
|
|
|
raise AssertionError("Invalid messages!")
|
2017-05-12 22:47:34 +02:00
|
|
|
|
2018-03-08 08:27:29 +01:00
|
|
|
# If message content is disabled, then flush all information we pass to email.
|
2019-01-14 14:04:08 +01:00
|
|
|
if not message_content_allowed_in_missedmessage_emails(user_profile):
|
2020-01-31 12:55:47 +01:00
|
|
|
realm = user_profile.realm
|
2020-09-03 05:32:15 +02:00
|
|
|
context.update(
|
|
|
|
reply_to_zulip=False,
|
|
|
|
messages=[],
|
|
|
|
sender_str="",
|
|
|
|
realm_str=realm.name,
|
|
|
|
huddle_display_name="",
|
|
|
|
show_message_content=False,
|
|
|
|
message_content_disabled_by_user=not user_profile.message_content_in_email_notifications,
|
|
|
|
message_content_disabled_by_realm=not realm.message_content_allowed_in_email_notifications,
|
|
|
|
)
|
2018-03-08 08:27:29 +01:00
|
|
|
else:
|
2020-09-03 05:32:15 +02:00
|
|
|
context.update(
|
2020-11-13 16:52:13 +01:00
|
|
|
messages=build_message_list(
|
|
|
|
user=user_profile,
|
2021-02-12 08:20:45 +01:00
|
|
|
messages=[m["message"] for m in missed_messages],
|
2020-11-13 16:52:13 +01:00
|
|
|
),
|
2020-09-03 05:32:15 +02:00
|
|
|
sender_str=", ".join(sender.full_name for sender in senders),
|
|
|
|
realm_str=user_profile.realm.name,
|
|
|
|
show_message_content=True,
|
|
|
|
)
|
2017-05-12 22:47:34 +02:00
|
|
|
|
2022-04-15 04:51:41 +02:00
|
|
|
# Soft reactivate the long_term_idle user personally mentioned
|
|
|
|
soft_reactivate_if_personal_notification(
|
2023-12-08 20:53:31 +01:00
|
|
|
user_profile, unique_triggers, mentioned_user_group_members_count
|
2022-04-15 04:51:41 +02:00
|
|
|
)
|
|
|
|
|
2020-02-12 19:52:39 +01:00
|
|
|
with override_language(user_profile.default_language):
|
2022-12-24 17:31:48 +01:00
|
|
|
from_name: str = _("{service_name} notifications").format(
|
|
|
|
service_name=settings.INSTALLATION_NAME
|
|
|
|
)
|
2017-07-07 23:59:12 +02:00
|
|
|
from_address = FromAddress.NOREPLY
|
2017-06-26 19:43:32 +02:00
|
|
|
|
2017-05-05 01:31:07 +02:00
|
|
|
email_dict = {
|
2021-02-12 08:20:45 +01:00
|
|
|
"template_prefix": "zerver/emails/missed_message",
|
|
|
|
"to_user_ids": [user_profile.id],
|
|
|
|
"from_name": from_name,
|
|
|
|
"from_address": from_address,
|
|
|
|
"reply_to_email": str(Address(display_name=reply_to_name, addr_spec=reply_to_address)),
|
|
|
|
"context": context,
|
2021-02-12 08:19:30 +01:00
|
|
|
}
|
2017-12-20 06:19:38 +01:00
|
|
|
queue_json_publish("email_senders", email_dict)
|
2014-08-11 14:15:16 +02:00
|
|
|
|
2017-04-15 04:03:56 +02:00
|
|
|
user_profile.last_reminder = timezone_now()
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profile.save(update_fields=["last_reminder"])
|
2014-08-11 14:15:16 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2023-07-12 19:05:57 +02:00
|
|
|
@dataclass
|
|
|
|
class MissedMessageData:
|
|
|
|
trigger: str
|
2024-07-12 02:30:23 +02:00
|
|
|
mentioned_user_group_id: int | None = None
|
2023-07-12 19:05:57 +02:00
|
|
|
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def handle_missedmessage_emails(
|
2024-07-12 02:30:17 +02:00
|
|
|
user_profile_id: int, message_ids: dict[int, MissedMessageData]
|
2021-02-12 08:19:30 +01:00
|
|
|
) -> None:
|
2014-01-24 22:29:17 +01:00
|
|
|
user_profile = get_user_profile_by_id(user_profile_id)
|
2021-12-19 12:04:36 +01:00
|
|
|
if user_profile.is_bot: # nocoverage
|
|
|
|
# We don't expect to reach here for bot users. However, this code exists
|
|
|
|
# to find and throw away any pre-existing events in the queue while
|
|
|
|
# upgrading from versions before our notifiability logic was implemented.
|
|
|
|
# TODO/compatibility: This block can be removed when one can no longer
|
|
|
|
# upgrade from versions <= 4.0 to versions >= 5.0
|
|
|
|
logger.warning("Send-email event found for bot user %s. Skipping.", user_profile_id)
|
2021-08-02 22:10:47 +02:00
|
|
|
return
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2021-09-02 02:49:20 +02:00
|
|
|
if not user_profile.enable_offline_email_notifications:
|
|
|
|
# BUG: Investigate why it's possible to get here.
|
|
|
|
return # nocoverage
|
|
|
|
|
2018-12-05 19:36:58 +01:00
|
|
|
# Note: This query structure automatically filters out any
|
|
|
|
# messages that were permanently deleted, since those would now be
|
|
|
|
# in the ArchivedMessage table, not the Message table.
|
2021-02-12 08:19:30 +01:00
|
|
|
messages = Message.objects.filter(
|
2023-08-30 21:19:37 +02:00
|
|
|
# Uses index: zerver_message_pkey
|
2021-02-12 08:19:30 +01:00
|
|
|
usermessage__user_profile_id=user_profile,
|
|
|
|
id__in=message_ids,
|
|
|
|
usermessage__flags=~UserMessage.flags.read,
|
2022-06-23 20:21:54 +02:00
|
|
|
# Cancel missed-message emails for deleted messages
|
|
|
|
).exclude(content="(deleted)")
|
2017-03-14 08:38:01 +01:00
|
|
|
|
2014-07-08 02:06:51 +02:00
|
|
|
if not messages:
|
|
|
|
return
|
|
|
|
|
2018-11-08 15:52:17 +01:00
|
|
|
# We bucket messages by tuples that identify similar messages.
|
|
|
|
# For streams it's recipient_id and topic.
|
2023-06-19 16:42:11 +02:00
|
|
|
# For direct messages it's recipient id and sender.
|
2024-07-12 02:30:23 +02:00
|
|
|
messages_by_bucket: dict[tuple[int, int | str], list[Message]] = defaultdict(list)
|
2014-07-15 21:03:51 +02:00
|
|
|
for msg in messages:
|
2017-08-25 04:15:05 +02:00
|
|
|
if msg.recipient.type == Recipient.PERSONAL:
|
2023-06-19 16:42:11 +02:00
|
|
|
# For direct messages group using (recipient, sender).
|
2018-11-08 15:52:17 +01:00
|
|
|
messages_by_bucket[(msg.recipient_id, msg.sender_id)].append(msg)
|
2017-08-25 04:15:05 +02:00
|
|
|
else:
|
2023-02-07 17:43:35 +01:00
|
|
|
messages_by_bucket[(msg.recipient_id, msg.topic_name().lower())].append(msg)
|
2014-07-15 21:03:51 +02:00
|
|
|
|
2018-11-08 15:52:17 +01:00
|
|
|
message_count_by_bucket = {
|
2021-02-12 08:19:30 +01:00
|
|
|
bucket_tup: len(msgs) for bucket_tup, msgs in messages_by_bucket.items()
|
2014-09-05 06:33:47 +02:00
|
|
|
}
|
|
|
|
|
2018-11-08 15:52:17 +01:00
|
|
|
for msg_list in messages_by_bucket.values():
|
2019-08-28 02:43:19 +02:00
|
|
|
msg = min(msg_list, key=lambda msg: msg.date_sent)
|
2023-11-04 14:05:38 +01:00
|
|
|
if msg.is_stream_message() and UserMessage.has_any_mentions(user_profile_id, msg.id):
|
2018-07-26 20:19:45 +02:00
|
|
|
context_messages = get_context_for_message(msg)
|
|
|
|
filtered_context_messages = bulk_access_messages(user_profile, context_messages)
|
|
|
|
msg_list.extend(filtered_context_messages)
|
2014-07-08 02:06:51 +02:00
|
|
|
|
2018-05-19 03:36:05 +02:00
|
|
|
# Sort emails by least recently-active discussion.
|
2024-07-12 02:30:23 +02:00
|
|
|
bucket_tups: list[tuple[tuple[int, int | str], int]] = []
|
2018-11-08 15:52:17 +01:00
|
|
|
for bucket_tup, msg_list in messages_by_bucket.items():
|
2018-05-19 03:36:05 +02:00
|
|
|
max_message_id = max(msg_list, key=lambda msg: msg.id).id
|
2018-11-08 15:52:17 +01:00
|
|
|
bucket_tups.append((bucket_tup, max_message_id))
|
2018-05-19 03:36:05 +02:00
|
|
|
|
2018-11-08 15:52:17 +01:00
|
|
|
bucket_tups = sorted(bucket_tups, key=lambda x: x[1])
|
2018-05-19 03:36:05 +02:00
|
|
|
|
2018-11-08 16:12:10 +01:00
|
|
|
# Send an email per bucket.
|
2018-11-08 15:52:17 +01:00
|
|
|
for bucket_tup, ignored_max_id in bucket_tups:
|
2018-07-14 07:31:10 +02:00
|
|
|
unique_messages = {}
|
2018-11-08 15:52:17 +01:00
|
|
|
for m in messages_by_bucket[bucket_tup]:
|
2020-05-21 14:51:36 +02:00
|
|
|
message_info = message_ids.get(m.id)
|
2018-07-14 07:31:10 +02:00
|
|
|
unique_messages[m.id] = dict(
|
|
|
|
message=m,
|
2023-07-12 19:05:57 +02:00
|
|
|
trigger=message_info.trigger if message_info else None,
|
2024-01-29 00:32:21 +01:00
|
|
|
mentioned_user_group_id=(
|
|
|
|
message_info.mentioned_user_group_id if message_info is not None else None
|
|
|
|
),
|
2018-07-14 07:31:10 +02:00
|
|
|
)
|
2016-06-21 11:23:23 +02:00
|
|
|
do_send_missedmessage_events_reply_in_zulip(
|
2014-09-05 06:33:47 +02:00
|
|
|
user_profile,
|
2016-01-25 01:27:18 +01:00
|
|
|
list(unique_messages.values()),
|
2018-11-08 15:52:17 +01:00
|
|
|
message_count_by_bucket[bucket_tup],
|
2014-09-05 06:33:47 +02:00
|
|
|
)
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_onboarding_email_schedule(user: UserProfile) -> dict[str, timedelta]:
|
2023-03-15 18:09:26 +01:00
|
|
|
onboarding_emails = {
|
|
|
|
# The delay should be 1 hour before the below specified number of days
|
|
|
|
# as our goal is to maximize the chance that this email is near the top
|
|
|
|
# of the user's inbox when the user sits down to deal with their inbox,
|
|
|
|
# or comes in while they are dealing with their inbox.
|
2023-07-18 11:50:12 +02:00
|
|
|
"onboarding_zulip_topics": timedelta(days=2, hours=-1),
|
2023-03-15 20:18:09 +01:00
|
|
|
"onboarding_zulip_guide": timedelta(days=4, hours=-1),
|
2023-04-24 17:46:49 +02:00
|
|
|
"onboarding_team_to_zulip": timedelta(days=6, hours=-1),
|
2023-03-15 18:09:26 +01:00
|
|
|
}
|
|
|
|
|
2017-11-02 14:11:48 +01:00
|
|
|
user_tz = user.timezone
|
2021-02-12 08:20:45 +01:00
|
|
|
if user_tz == "":
|
|
|
|
user_tz = "UTC"
|
2024-02-28 01:36:17 +01:00
|
|
|
signup_day = user.date_joined.astimezone(
|
|
|
|
zoneinfo.ZoneInfo(canonicalize_timezone(user_tz))
|
|
|
|
).isoweekday()
|
2023-03-15 18:09:26 +01:00
|
|
|
|
2023-03-15 20:18:09 +01:00
|
|
|
# General rules for scheduling welcome emails flow:
|
|
|
|
# -Do not send emails on Saturday or Sunday
|
|
|
|
# -Have at least one weekday between each (potential) email
|
|
|
|
|
2023-04-24 17:46:49 +02:00
|
|
|
# User signed up on Monday
|
|
|
|
if signup_day == 1:
|
|
|
|
# Send onboarding_team_to_zulip on Tuesday
|
|
|
|
onboarding_emails["onboarding_team_to_zulip"] = timedelta(days=8, hours=-1)
|
|
|
|
|
2023-03-15 20:18:09 +01:00
|
|
|
# User signed up on Tuesday
|
|
|
|
if signup_day == 2:
|
|
|
|
# Send onboarding_zulip_guide on Monday
|
|
|
|
onboarding_emails["onboarding_zulip_guide"] = timedelta(days=6, hours=-1)
|
2023-04-24 17:46:49 +02:00
|
|
|
# Send onboarding_team_to_zulip on Wednesday
|
|
|
|
onboarding_emails["onboarding_team_to_zulip"] = timedelta(days=8, hours=-1)
|
2023-03-15 20:18:09 +01:00
|
|
|
|
|
|
|
# User signed up on Wednesday
|
|
|
|
if signup_day == 3:
|
|
|
|
# Send onboarding_zulip_guide on Tuesday
|
|
|
|
onboarding_emails["onboarding_zulip_guide"] = timedelta(days=6, hours=-1)
|
2023-04-24 17:46:49 +02:00
|
|
|
# Send onboarding_team_to_zulip on Thursday
|
|
|
|
onboarding_emails["onboarding_team_to_zulip"] = timedelta(days=8, hours=-1)
|
2023-03-15 20:18:09 +01:00
|
|
|
|
2023-03-15 18:09:26 +01:00
|
|
|
# User signed up on Thursday
|
|
|
|
if signup_day == 4:
|
2023-07-18 11:50:12 +02:00
|
|
|
# Send onboarding_zulip_topics on Monday
|
|
|
|
onboarding_emails["onboarding_zulip_topics"] = timedelta(days=4, hours=-1)
|
2023-03-15 20:18:09 +01:00
|
|
|
# Send onboarding_zulip_guide on Wednesday
|
|
|
|
onboarding_emails["onboarding_zulip_guide"] = timedelta(days=6, hours=-1)
|
2023-04-24 17:46:49 +02:00
|
|
|
# Send onboarding_team_to_zulip on Friday
|
|
|
|
onboarding_emails["onboarding_team_to_zulip"] = timedelta(days=8, hours=-1)
|
2023-03-15 18:09:26 +01:00
|
|
|
|
|
|
|
# User signed up on Friday
|
2017-11-02 14:11:48 +01:00
|
|
|
if signup_day == 5:
|
2023-07-18 11:50:12 +02:00
|
|
|
# Send onboarding_zulip_topics on Tuesday
|
|
|
|
onboarding_emails["onboarding_zulip_topics"] = timedelta(days=4, hours=-1)
|
2023-03-15 20:18:09 +01:00
|
|
|
# Send onboarding_zulip_guide on Thursday
|
|
|
|
onboarding_emails["onboarding_zulip_guide"] = timedelta(days=6, hours=-1)
|
2023-04-24 17:46:49 +02:00
|
|
|
# Send onboarding_team_to_zulip on Monday
|
|
|
|
onboarding_emails["onboarding_team_to_zulip"] = timedelta(days=10, hours=-1)
|
|
|
|
|
|
|
|
# User signed up on Saturday; no adjustments needed
|
|
|
|
|
|
|
|
# User signed up on Sunday
|
|
|
|
if signup_day == 7:
|
|
|
|
# Send onboarding_team_to_zulip on Monday
|
|
|
|
onboarding_emails["onboarding_team_to_zulip"] = timedelta(days=8, hours=-1)
|
2017-11-02 14:11:48 +01:00
|
|
|
|
2023-03-15 18:09:26 +01:00
|
|
|
return onboarding_emails
|
2017-11-02 14:11:48 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-07-12 02:30:17 +02:00
|
|
|
def get_org_type_zulip_guide(realm: Realm) -> tuple[Any, str]:
|
2023-03-15 20:18:09 +01:00
|
|
|
for realm_type, realm_type_details in Realm.ORG_TYPES.items():
|
|
|
|
if realm_type_details["id"] == realm.org_type:
|
|
|
|
organization_type_in_template = realm_type
|
|
|
|
|
|
|
|
# There are two education organization types that receive the same email
|
|
|
|
# content, so we simplify to one shared template context value here.
|
|
|
|
if organization_type_in_template == "education_nonprofit":
|
|
|
|
organization_type_in_template = "education"
|
|
|
|
|
|
|
|
return (realm_type_details["onboarding_zulip_guide_url"], organization_type_in_template)
|
|
|
|
|
|
|
|
# Log problem, and return values that will not send onboarding_zulip_guide email.
|
|
|
|
logging.error("Unknown organization type '%s'", realm.org_type)
|
|
|
|
return (None, "")
|
|
|
|
|
|
|
|
|
2024-07-12 02:30:23 +02:00
|
|
|
def welcome_sender_information() -> tuple[str | None, str]:
|
2016-07-19 07:37:53 +02:00
|
|
|
if settings.WELCOME_EMAIL_SENDER is not None:
|
2021-02-12 08:20:45 +01:00
|
|
|
from_name = settings.WELCOME_EMAIL_SENDER["name"]
|
|
|
|
from_address = settings.WELCOME_EMAIL_SENDER["email"]
|
2016-07-19 07:37:53 +02:00
|
|
|
else:
|
2017-06-26 19:43:32 +02:00
|
|
|
from_name = None
|
2020-03-12 20:28:05 +01:00
|
|
|
from_address = FromAddress.support_placeholder
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2023-06-30 13:27:25 +02:00
|
|
|
return (from_name, from_address)
|
|
|
|
|
|
|
|
|
|
|
|
def send_account_registered_email(user: UserProfile, realm_creation: bool = False) -> None:
|
|
|
|
# Imported here to avoid import cycles.
|
|
|
|
from zerver.context_processors import common_context
|
|
|
|
|
2021-12-23 01:41:41 +01:00
|
|
|
if user.delivery_email == "":
|
|
|
|
# Do not attempt to enqueue welcome emails for users without an email address.
|
|
|
|
# The assertions here are to help document the only circumstance under which
|
|
|
|
# this condition should be possible.
|
|
|
|
assert user.realm.demo_organization_scheduled_deletion_date is not None and realm_creation
|
|
|
|
return
|
|
|
|
|
2023-06-30 13:27:25 +02:00
|
|
|
from_name, from_address = welcome_sender_information()
|
2024-05-06 15:27:22 +02:00
|
|
|
realm_url = user.realm.url
|
2023-04-10 18:13:04 +02:00
|
|
|
|
2023-06-30 13:27:25 +02:00
|
|
|
account_registered_context = common_context(user)
|
|
|
|
account_registered_context.update(
|
2020-09-03 05:32:15 +02:00
|
|
|
realm_creation=realm_creation,
|
|
|
|
email=user.delivery_email,
|
2021-04-28 02:08:52 +02:00
|
|
|
is_realm_admin=user.is_realm_admin,
|
2022-08-03 22:13:25 +02:00
|
|
|
is_demo_organization=user.realm.demo_organization_scheduled_deletion_date is not None,
|
2020-09-03 05:32:15 +02:00
|
|
|
)
|
2023-02-03 02:16:43 +01:00
|
|
|
|
2023-06-30 13:27:25 +02:00
|
|
|
account_registered_context["getting_organization_started_link"] = (
|
2024-09-26 18:23:32 +02:00
|
|
|
realm_url + "/help/moving-to-zulip"
|
2023-04-10 18:13:04 +02:00
|
|
|
)
|
|
|
|
|
2023-06-30 13:27:25 +02:00
|
|
|
account_registered_context["getting_user_started_link"] = (
|
2023-04-10 18:13:04 +02:00
|
|
|
realm_url + "/help/getting-started-with-zulip"
|
2023-02-03 02:16:43 +01:00
|
|
|
)
|
2018-11-14 12:46:56 +01:00
|
|
|
|
2019-11-05 02:29:03 +01:00
|
|
|
# Imported here to avoid import cycles.
|
2020-06-11 00:54:34 +02:00
|
|
|
from zproject.backends import ZulipLDAPAuthBackend, email_belongs_to_ldap
|
2018-11-29 16:32:17 +01:00
|
|
|
|
2019-11-16 01:49:28 +01:00
|
|
|
if email_belongs_to_ldap(user.realm, user.delivery_email):
|
2023-06-30 13:27:25 +02:00
|
|
|
account_registered_context["ldap"] = True
|
2019-11-05 02:29:03 +01:00
|
|
|
for backend in get_backends():
|
|
|
|
# If the user is doing authentication via LDAP, Note that
|
|
|
|
# we exclude ZulipLDAPUserPopulator here, since that
|
|
|
|
# isn't used for authentication.
|
|
|
|
if isinstance(backend, ZulipLDAPAuthBackend):
|
2023-06-30 13:27:25 +02:00
|
|
|
account_registered_context["ldap_username"] = backend.django_to_ldap_username(
|
2023-04-10 18:13:04 +02:00
|
|
|
user.delivery_email
|
|
|
|
)
|
2019-11-05 02:29:03 +01:00
|
|
|
break
|
2018-05-26 12:15:47 +02:00
|
|
|
|
2017-05-03 18:20:16 +02:00
|
|
|
send_future_email(
|
2023-07-18 11:44:27 +02:00
|
|
|
"zerver/emails/account_registered",
|
2021-02-12 08:19:30 +01:00
|
|
|
user.realm,
|
|
|
|
to_user_ids=[user.id],
|
|
|
|
from_name=from_name,
|
|
|
|
from_address=from_address,
|
2023-06-30 13:27:25 +02:00
|
|
|
context=account_registered_context,
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2018-11-14 12:58:35 +01:00
|
|
|
|
2023-06-30 13:27:25 +02:00
|
|
|
|
2021-12-23 01:41:41 +01:00
|
|
|
def enqueue_welcome_emails(user: UserProfile, realm_creation: bool = False) -> None:
|
2023-06-30 13:27:25 +02:00
|
|
|
# Imported here to avoid import cycles.
|
|
|
|
from zerver.context_processors import common_context
|
|
|
|
|
2021-12-23 01:41:41 +01:00
|
|
|
if user.delivery_email == "":
|
|
|
|
# Do not attempt to enqueue welcome emails for users without an email address.
|
|
|
|
# The assertions here are to help document the only circumstance under which
|
|
|
|
# this condition should be possible.
|
|
|
|
assert user.realm.demo_organization_scheduled_deletion_date is not None and realm_creation
|
|
|
|
return
|
|
|
|
|
2023-06-30 13:27:25 +02:00
|
|
|
from_name, from_address = welcome_sender_information()
|
|
|
|
other_account_count = (
|
|
|
|
UserProfile.objects.filter(delivery_email__iexact=user.delivery_email)
|
|
|
|
.exclude(id=user.id)
|
|
|
|
.count()
|
|
|
|
)
|
|
|
|
unsubscribe_link = one_click_unsubscribe_link(user, "welcome")
|
2024-05-06 15:27:22 +02:00
|
|
|
realm_url = user.realm.url
|
2023-06-30 13:27:25 +02:00
|
|
|
|
2023-03-15 18:09:26 +01:00
|
|
|
# Any emails scheduled below should be added to the logic in get_onboarding_email_schedule
|
|
|
|
# to determine how long to delay sending the email based on when the user signed up.
|
|
|
|
onboarding_email_schedule = get_onboarding_email_schedule(user)
|
|
|
|
|
2018-11-14 12:58:35 +01:00
|
|
|
if other_account_count == 0:
|
2023-07-18 11:50:12 +02:00
|
|
|
onboarding_zulip_topics_context = common_context(user)
|
2023-04-10 18:13:04 +02:00
|
|
|
|
2023-07-18 11:50:12 +02:00
|
|
|
onboarding_zulip_topics_context.update(
|
2023-04-10 18:13:04 +02:00
|
|
|
unsubscribe_link=unsubscribe_link,
|
2023-03-11 01:22:36 +01:00
|
|
|
move_messages_link=realm_url + "/help/move-content-to-another-topic",
|
|
|
|
rename_topics_link=realm_url + "/help/rename-a-topic",
|
2024-05-01 19:30:20 +02:00
|
|
|
move_channels_link=realm_url + "/help/move-content-to-another-channel",
|
2023-04-10 18:13:04 +02:00
|
|
|
)
|
|
|
|
|
2018-11-14 12:58:35 +01:00
|
|
|
send_future_email(
|
2023-07-18 11:50:12 +02:00
|
|
|
"zerver/emails/onboarding_zulip_topics",
|
2021-02-12 08:19:30 +01:00
|
|
|
user.realm,
|
|
|
|
to_user_ids=[user.id],
|
|
|
|
from_name=from_name,
|
|
|
|
from_address=from_address,
|
2023-07-18 11:50:12 +02:00
|
|
|
context=onboarding_zulip_topics_context,
|
|
|
|
delay=onboarding_email_schedule["onboarding_zulip_topics"],
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
|
2023-03-15 20:18:09 +01:00
|
|
|
# We only send the onboarding_zulip_guide email for a subset of Realm.ORG_TYPES
|
|
|
|
onboarding_zulip_guide_url, organization_type_reference = get_org_type_zulip_guide(user.realm)
|
|
|
|
|
|
|
|
# Only send follow_zulip_guide to "/for/communities/" guide if user is realm admin.
|
|
|
|
# TODO: Remove this condition and related tests when guide is updated;
|
|
|
|
# see https://github.com/zulip/zulip/issues/24822.
|
|
|
|
if (
|
|
|
|
onboarding_zulip_guide_url == Realm.ORG_TYPES["community"]["onboarding_zulip_guide_url"]
|
|
|
|
and not user.is_realm_admin
|
|
|
|
):
|
|
|
|
onboarding_zulip_guide_url = None
|
|
|
|
|
|
|
|
if onboarding_zulip_guide_url is not None:
|
|
|
|
onboarding_zulip_guide_context = common_context(user)
|
|
|
|
onboarding_zulip_guide_context.update(
|
2023-07-18 11:50:12 +02:00
|
|
|
# We use the same unsubscribe link in both onboarding_zulip_topics
|
2023-03-15 20:18:09 +01:00
|
|
|
# and onboarding_zulip_guide as these links do not expire.
|
|
|
|
unsubscribe_link=unsubscribe_link,
|
|
|
|
organization_type=organization_type_reference,
|
|
|
|
zulip_guide_link=onboarding_zulip_guide_url,
|
|
|
|
)
|
|
|
|
|
|
|
|
send_future_email(
|
|
|
|
"zerver/emails/onboarding_zulip_guide",
|
|
|
|
user.realm,
|
|
|
|
to_user_ids=[user.id],
|
|
|
|
from_name=from_name,
|
|
|
|
from_address=from_address,
|
|
|
|
context=onboarding_zulip_guide_context,
|
|
|
|
delay=onboarding_email_schedule["onboarding_zulip_guide"],
|
|
|
|
)
|
|
|
|
|
2023-04-24 17:46:49 +02:00
|
|
|
# We only send the onboarding_team_to_zulip email to user who created the organization.
|
|
|
|
if realm_creation:
|
|
|
|
onboarding_team_to_zulip_context = common_context(user)
|
|
|
|
onboarding_team_to_zulip_context.update(
|
|
|
|
unsubscribe_link=unsubscribe_link,
|
2024-09-26 18:23:32 +02:00
|
|
|
get_organization_started=realm_url + "/help/moving-to-zulip",
|
2023-04-24 17:46:49 +02:00
|
|
|
invite_users=realm_url + "/help/invite-users-to-join",
|
|
|
|
trying_out_zulip=realm_url + "/help/trying-out-zulip",
|
|
|
|
why_zulip="https://zulip.com/why-zulip/",
|
|
|
|
)
|
|
|
|
|
|
|
|
send_future_email(
|
|
|
|
"zerver/emails/onboarding_team_to_zulip",
|
|
|
|
user.realm,
|
|
|
|
to_user_ids=[user.id],
|
|
|
|
from_name=from_name,
|
|
|
|
from_address=from_address,
|
|
|
|
context=onboarding_team_to_zulip_context,
|
|
|
|
delay=onboarding_email_schedule["onboarding_team_to_zulip"],
|
|
|
|
)
|
|
|
|
|
2014-01-24 22:29:17 +01:00
|
|
|
|
2018-05-11 01:40:23 +02:00
|
|
|
def convert_html_to_markdown(html: str) -> str:
|
2022-06-26 01:14:30 +02:00
|
|
|
# html2text is GPL licensed, so run it as a subprocess.
|
2022-07-01 01:21:42 +02:00
|
|
|
markdown = subprocess.check_output(
|
|
|
|
[os.path.join(sys.prefix, "bin", "html2text")], input=html, text=True
|
|
|
|
).strip()
|
2019-07-24 02:49:16 +02:00
|
|
|
|
2014-01-24 22:29:17 +01:00
|
|
|
# We want images to get linked and inline previewed, but html2text will turn
|
|
|
|
# them into links of the form `![](http://foo.com/image.png)`, which is
|
|
|
|
# ugly. Run a regex over the resulting description, turning links of the
|
|
|
|
# form `![](http://foo.com/image.png?12345)` into
|
|
|
|
# `[image.png](http://foo.com/image.png)`.
|
2024-04-26 20:30:22 +02:00
|
|
|
return re.sub(r"!\[\]\((\S*)/(\S*)\?(\S*)\)", "[\\2](\\1/\\2)", markdown)
|