2018-03-28 21:42:06 +02:00
|
|
|
import time
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
from typing import Any, Callable, Collection, Dict, List
|
2020-06-11 00:54:34 +02:00
|
|
|
from unittest import mock
|
2017-09-27 20:27:04 +02:00
|
|
|
|
2020-08-07 01:09:47 +02:00
|
|
|
import orjson
|
2017-09-27 20:55:58 +02:00
|
|
|
from django.http import HttpRequest, HttpResponse
|
2017-09-27 20:27:04 +02:00
|
|
|
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.actions import do_change_subscription_property, do_mute_topic
|
2017-09-27 20:27:04 +02:00
|
|
|
from zerver.lib.test_classes import ZulipTestCase
|
2021-02-07 21:34:01 +01:00
|
|
|
from zerver.lib.test_helpers import HostRequestMock, mock_queue_publish
|
2021-07-01 17:40:16 +02:00
|
|
|
from zerver.lib.user_groups import create_user_group, remove_user_from_user_group
|
2018-08-08 21:39:19 +02:00
|
|
|
from zerver.models import Recipient, Stream, Subscription, UserProfile, get_stream
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.tornado.event_queue import (
|
|
|
|
ClientDescriptor,
|
|
|
|
allocate_client_descriptor,
|
|
|
|
get_client_descriptor,
|
|
|
|
maybe_enqueue_notifications,
|
|
|
|
missedmessage_hook,
|
|
|
|
persistent_queue_filename,
|
2021-06-18 19:30:57 +02:00
|
|
|
process_notification,
|
2020-06-11 00:54:34 +02:00
|
|
|
)
|
|
|
|
from zerver.tornado.views import cleanup_event_queue, get_events
|
|
|
|
|
2017-09-27 20:27:04 +02:00
|
|
|
|
|
|
|
class MissedMessageNotificationsTest(ZulipTestCase):
|
|
|
|
"""Tests the logic for when missed-message notifications
|
|
|
|
should be triggered, based on user settings"""
|
2020-04-22 01:45:30 +02:00
|
|
|
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
def test_maybe_enqueue_notifications(self) -> None:
|
|
|
|
# We've already tested the "when to send notifications" logic as part of the
|
|
|
|
# notification_data module.
|
|
|
|
# This test is for verifying whether `maybe_enqueue_notifications` returns the
|
|
|
|
# `already_notified` data correctly.
|
|
|
|
params = self.get_maybe_enqueue_notifications_parameters(
|
2021-06-25 13:58:53 +02:00
|
|
|
message_id=1, user_id=1, acting_user_id=2
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
)
|
2021-06-08 13:41:44 +02:00
|
|
|
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
with mock_queue_publish(
|
|
|
|
"zerver.tornado.event_queue.queue_json_publish"
|
|
|
|
) as mock_queue_json_publish:
|
|
|
|
notified = maybe_enqueue_notifications(**params)
|
|
|
|
mock_queue_json_publish.assert_not_called()
|
2021-06-08 15:00:11 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
with mock_queue_publish(
|
|
|
|
"zerver.tornado.event_queue.queue_json_publish"
|
|
|
|
) as mock_queue_json_publish:
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
params["user_notifications_data"] = self.create_user_notifications_data_object(
|
|
|
|
user_id=1, pm_push_notify=True, pm_email_notify=True
|
|
|
|
)
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
notified = maybe_enqueue_notifications(**params)
|
|
|
|
self.assertTrue(mock_queue_json_publish.call_count, 2)
|
2017-10-18 06:54:03 +02:00
|
|
|
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
queues_pushed = [entry[0][0] for entry in mock_queue_json_publish.call_args_list]
|
|
|
|
self.assertIn("missedmessage_mobile_notifications", queues_pushed)
|
|
|
|
self.assertIn("missedmessage_emails", queues_pushed)
|
2017-09-27 20:27:04 +02:00
|
|
|
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
self.assertTrue(notified["email_notified"])
|
|
|
|
self.assertTrue(notified["push_notified"])
|
2019-04-18 03:58:25 +02:00
|
|
|
|
2021-07-01 17:40:16 +02:00
|
|
|
with mock_queue_publish(
|
|
|
|
"zerver.tornado.event_queue.queue_json_publish"
|
|
|
|
) as mock_queue_json_publish:
|
|
|
|
params = self.get_maybe_enqueue_notifications_parameters(
|
|
|
|
message_id=1,
|
|
|
|
acting_user_id=2,
|
|
|
|
user_id=3,
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
mention_push_notify=True,
|
|
|
|
mention_email_notify=True,
|
2021-07-01 17:40:16 +02:00
|
|
|
mentioned_user_group_id=33,
|
|
|
|
)
|
|
|
|
notified = maybe_enqueue_notifications(**params)
|
|
|
|
self.assertTrue(mock_queue_json_publish.call_count, 2)
|
|
|
|
|
|
|
|
push_notice = mock_queue_json_publish.call_args_list[0][0][1]
|
|
|
|
self.assertEqual(push_notice["mentioned_user_group_id"], 33)
|
|
|
|
|
|
|
|
email_notice = mock_queue_json_publish.call_args_list[1][0][1]
|
|
|
|
self.assertEqual(email_notice["mentioned_user_group_id"], 33)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def tornado_call(
|
|
|
|
self,
|
|
|
|
view_func: Callable[[HttpRequest, UserProfile], HttpResponse],
|
|
|
|
user_profile: UserProfile,
|
|
|
|
post_data: Dict[str, Any],
|
|
|
|
) -> HttpResponse:
|
2021-02-07 21:34:01 +01:00
|
|
|
request = HostRequestMock(post_data, user_profile)
|
2017-09-27 20:55:58 +02:00
|
|
|
return view_func(request, user_profile)
|
|
|
|
|
2018-03-28 21:42:06 +02:00
|
|
|
def test_stream_watchers(self) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2018-03-28 21:42:06 +02:00
|
|
|
We used to have a bug with stream_watchers, where we set their flags to
|
|
|
|
None.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2021-02-12 08:20:45 +01:00
|
|
|
cordelia = self.example_user("cordelia")
|
|
|
|
hamlet = self.example_user("hamlet")
|
2018-03-28 21:42:06 +02:00
|
|
|
realm = hamlet.realm
|
2021-02-12 08:20:45 +01:00
|
|
|
stream_name = "Denmark"
|
2018-03-28 21:42:06 +02:00
|
|
|
|
|
|
|
self.unsubscribe(hamlet, stream_name)
|
|
|
|
|
|
|
|
queue_data = dict(
|
|
|
|
all_public_streams=True,
|
|
|
|
apply_markdown=True,
|
|
|
|
client_gravatar=True,
|
2021-02-12 08:20:45 +01:00
|
|
|
client_type_name="home grown API program",
|
|
|
|
event_types=["message"],
|
2018-03-28 21:42:06 +02:00
|
|
|
last_connection_time=time.time(),
|
|
|
|
queue_timeout=0,
|
|
|
|
realm_id=realm.id,
|
|
|
|
user_profile_id=hamlet.id,
|
|
|
|
)
|
|
|
|
|
|
|
|
client = allocate_client_descriptor(queue_data)
|
|
|
|
|
2020-03-07 11:43:05 +01:00
|
|
|
self.send_stream_message(cordelia, stream_name)
|
2018-03-28 21:42:06 +02:00
|
|
|
|
2021-05-17 05:41:32 +02:00
|
|
|
self.assert_length(client.event_queue.contents(), 1)
|
2018-03-28 21:42:06 +02:00
|
|
|
|
|
|
|
# This next line of code should silently succeed and basically do
|
|
|
|
# nothing under the covers. This test is here to prevent a bug
|
|
|
|
# from re-appearing.
|
|
|
|
missedmessage_hook(
|
|
|
|
user_profile_id=hamlet.id,
|
|
|
|
client=client,
|
|
|
|
last_for_client=True,
|
|
|
|
)
|
|
|
|
|
2017-11-05 10:51:25 +01:00
|
|
|
def test_end_to_end_missedmessage_hook(self) -> None:
|
2017-09-27 20:55:58 +02:00
|
|
|
"""Tests what arguments missedmessage_hook passes into maybe_enqueue_notifications.
|
|
|
|
Combined with the previous test, this ensures that the missedmessage_hook is correct"""
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profile = self.example_user("hamlet")
|
2021-07-01 17:40:16 +02:00
|
|
|
cordelia = self.example_user("cordelia")
|
2019-12-11 01:41:20 +01:00
|
|
|
|
|
|
|
user_profile.enable_online_push_notifications = False
|
|
|
|
user_profile.save()
|
|
|
|
|
2021-06-11 14:27:00 +02:00
|
|
|
iago = self.example_user("iago")
|
|
|
|
|
2019-09-03 23:27:45 +02:00
|
|
|
# Fetch the Denmark stream for testing
|
|
|
|
stream = get_stream("Denmark", user_profile.realm)
|
2021-02-12 08:19:30 +01:00
|
|
|
sub = Subscription.objects.get(
|
|
|
|
user_profile=user_profile,
|
|
|
|
recipient__type=Recipient.STREAM,
|
|
|
|
recipient__type_id=stream.id,
|
|
|
|
)
|
2019-09-03 23:27:45 +02:00
|
|
|
|
2020-03-06 18:40:46 +01:00
|
|
|
self.login_user(user_profile)
|
2017-09-27 20:55:58 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def change_subscription_properties(
|
|
|
|
user_profile: UserProfile,
|
|
|
|
stream: Stream,
|
|
|
|
sub: Subscription,
|
|
|
|
properties: Dict[str, bool],
|
|
|
|
) -> None:
|
2018-08-08 21:39:19 +02:00
|
|
|
for property_name, value in properties.items():
|
2021-04-08 02:41:57 +02:00
|
|
|
do_change_subscription_property(
|
|
|
|
user_profile, sub, stream, property_name, value, acting_user=None
|
|
|
|
)
|
2018-08-08 21:39:19 +02:00
|
|
|
|
2019-08-26 04:35:47 +02:00
|
|
|
def allocate_event_queue() -> ClientDescriptor:
|
2021-02-12 08:19:30 +01:00
|
|
|
result = self.tornado_call(
|
|
|
|
get_events,
|
|
|
|
user_profile,
|
|
|
|
{
|
|
|
|
"apply_markdown": orjson.dumps(True).decode(),
|
|
|
|
"client_gravatar": orjson.dumps(True).decode(),
|
|
|
|
"event_types": orjson.dumps(["message"]).decode(),
|
|
|
|
"user_client": "website",
|
|
|
|
"dont_block": orjson.dumps(True).decode(),
|
|
|
|
},
|
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
self.assert_json_success(result)
|
2020-08-07 01:09:47 +02:00
|
|
|
queue_id = orjson.loads(result.content)["queue_id"]
|
2019-08-26 04:35:47 +02:00
|
|
|
return get_client_descriptor(queue_id)
|
|
|
|
|
|
|
|
def destroy_event_queue(queue_id: str) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
result = self.tornado_call(cleanup_event_queue, user_profile, {"queue_id": queue_id})
|
2019-08-26 04:35:47 +02:00
|
|
|
self.assert_json_success(result)
|
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
def assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict: Collection[Any],
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id: int,
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
**kwargs: Any,
|
2021-05-31 10:52:32 +02:00
|
|
|
) -> None:
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
expected_args_dict = self.get_maybe_enqueue_notifications_parameters(
|
|
|
|
user_id=user_profile.id,
|
2021-06-25 13:58:53 +02:00
|
|
|
acting_user_id=iago.id,
|
2021-06-23 12:25:38 +02:00
|
|
|
message_id=message_id,
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
**kwargs,
|
2021-05-31 10:52:32 +02:00
|
|
|
)
|
2021-06-23 12:25:38 +02:00
|
|
|
self.assertEqual(args_dict, expected_args_dict)
|
2021-05-31 10:52:32 +02:00
|
|
|
|
2019-08-26 04:35:47 +02:00
|
|
|
client_descriptor = allocate_event_queue()
|
2017-09-27 20:55:58 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
# To test the missed_message hook, we first need to send a message
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_stream_message(iago, "Denmark")
|
2017-09-27 20:55:58 +02:00
|
|
|
|
|
|
|
# Verify that nothing happens if you call it as not the
|
|
|
|
# "last client descriptor", in which case the function
|
|
|
|
# short-circuits, since the `missedmessage_hook` handler
|
|
|
|
# for garbage-collection is only for the user's last queue.
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, False)
|
|
|
|
mock_enqueue.assert_not_called()
|
|
|
|
|
|
|
|
# Now verify that we called the appropriate enqueue function
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2017-09-27 20:55:58 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
|
|
|
already_notified={"email_notified": False, "push_notified": False},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
2017-09-27 20:55:58 +02:00
|
|
|
|
2019-09-03 23:27:45 +02:00
|
|
|
# Test the hook with a private message; this should trigger notifications
|
2019-08-26 04:35:47 +02:00
|
|
|
client_descriptor = allocate_event_queue()
|
2017-09-27 20:55:58 +02:00
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_personal_message(iago, user_profile)
|
2017-09-27 20:55:58 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2017-09-27 20:55:58 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
pm_email_notify=True,
|
|
|
|
pm_push_notify=True,
|
2021-05-31 10:52:32 +02:00
|
|
|
already_notified={"email_notified": True, "push_notified": True},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
2017-09-27 20:55:58 +02:00
|
|
|
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
# If `enable_offline_email_notifications` is disabled, email otifications shouldn't
|
|
|
|
# be sent even for PMs
|
|
|
|
user_profile.enable_offline_email_notifications = False
|
|
|
|
user_profile.save()
|
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
|
|
|
msg_id = self.send_personal_message(iago, user_profile)
|
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
|
|
|
|
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
|
|
|
args_dict=args_dict,
|
|
|
|
message_id=msg_id,
|
|
|
|
pm_email_notify=False,
|
|
|
|
pm_push_notify=True,
|
|
|
|
already_notified={"email_notified": False, "push_notified": True},
|
|
|
|
)
|
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
user_profile.enable_offline_email_notifications = True
|
|
|
|
user_profile.save()
|
|
|
|
|
2019-09-03 23:27:45 +02:00
|
|
|
# Test the hook with a mention; this should trigger notifications
|
2019-08-26 04:35:47 +02:00
|
|
|
client_descriptor = allocate_event_queue()
|
2017-09-27 20:55:58 +02:00
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-02-12 08:19:30 +01:00
|
|
|
msg_id = self.send_stream_message(
|
|
|
|
self.example_user("iago"), "Denmark", content="@**King Hamlet** what's up?"
|
|
|
|
)
|
2017-09-27 20:55:58 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2017-09-27 20:55:58 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
mention_push_notify=True,
|
|
|
|
mention_email_notify=True,
|
2021-05-31 10:52:32 +02:00
|
|
|
already_notified={"email_notified": True, "push_notified": True},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:40:07 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
# If `enable_offline_push_notifications` is disabled, push otifications shouldn't
|
|
|
|
# be sent even for mentions
|
|
|
|
user_profile.enable_offline_push_notifications = False
|
|
|
|
user_profile.save()
|
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
|
|
|
msg_id = self.send_personal_message(iago, user_profile)
|
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
|
|
|
|
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
|
|
|
args_dict=args_dict,
|
|
|
|
message_id=msg_id,
|
|
|
|
pm_email_notify=True,
|
|
|
|
pm_push_notify=False,
|
|
|
|
already_notified={"email_notified": True, "push_notified": False},
|
|
|
|
)
|
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
user_profile.enable_offline_push_notifications = True
|
|
|
|
user_profile.save()
|
|
|
|
|
2019-09-03 23:27:45 +02:00
|
|
|
# Test the hook with a wildcard mention; this should trigger notifications
|
2019-08-26 04:40:07 +02:00
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
|
2019-08-26 04:40:07 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2019-08-26 04:40:07 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
2021-08-10 15:41:41 +02:00
|
|
|
wildcard_mention_email_notify=True,
|
|
|
|
wildcard_mention_push_notify=True,
|
2021-05-31 10:52:32 +02:00
|
|
|
already_notified={"email_notified": True, "push_notified": True},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
|
2019-09-03 23:27:45 +02:00
|
|
|
# Wildcard mentions in muted streams don't notify.
|
2021-02-12 08:20:45 +01:00
|
|
|
change_subscription_properties(user_profile, stream, sub, {"is_muted": True})
|
2019-09-03 23:27:45 +02:00
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
|
2019-09-03 23:27:45 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2019-09-03 23:27:45 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-08-10 15:41:41 +02:00
|
|
|
wildcard_mention_email_notify=False,
|
|
|
|
wildcard_mention_push_notify=False,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
|
|
|
already_notified={"email_notified": False, "push_notified": False},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-09-03 23:27:45 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
2021-02-12 08:20:45 +01:00
|
|
|
change_subscription_properties(user_profile, stream, sub, {"is_muted": False})
|
2019-09-03 23:27:45 +02:00
|
|
|
|
|
|
|
# With wildcard_mentions_notify=False, we treat the user as not mentioned.
|
|
|
|
user_profile.wildcard_mentions_notify = False
|
|
|
|
user_profile.save()
|
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
|
2019-09-03 23:27:45 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2019-09-03 23:27:45 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
2021-08-10 15:41:41 +02:00
|
|
|
wildcard_mention_email_notify=False,
|
|
|
|
wildcard_mention_push_notify=False,
|
2021-05-31 10:52:32 +02:00
|
|
|
already_notified={"email_notified": False, "push_notified": False},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-09-03 23:27:45 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
user_profile.wildcard_mentions_notify = True
|
|
|
|
user_profile.save()
|
2019-08-26 04:35:47 +02:00
|
|
|
|
2019-11-26 02:37:12 +01:00
|
|
|
# If wildcard_mentions_notify=True for a stream and False for a user, we treat the user
|
|
|
|
# as mentioned for that stream.
|
|
|
|
user_profile.wildcard_mentions_notify = False
|
|
|
|
sub.wildcard_mentions_notify = True
|
|
|
|
user_profile.save()
|
|
|
|
sub.save()
|
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
|
2019-11-26 02:37:12 +01:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2019-11-26 02:37:12 +01:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
2021-08-10 15:41:41 +02:00
|
|
|
wildcard_mention_email_notify=True,
|
|
|
|
wildcard_mention_push_notify=True,
|
2021-05-31 10:52:32 +02:00
|
|
|
already_notified={"email_notified": True, "push_notified": True},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-11-26 02:37:12 +01:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
user_profile.wildcard_mentions_notify = True
|
|
|
|
sub.wildcard_mentions_notify = None
|
|
|
|
user_profile.save()
|
|
|
|
sub.save()
|
|
|
|
|
2021-08-10 15:41:41 +02:00
|
|
|
# If notifications for personal mentions themselves have been turned off,
|
|
|
|
# even turning on `wildcard_mentions_notify` should not send notifications
|
|
|
|
user_profile.enable_offline_email_notifications = False
|
|
|
|
user_profile.wildcard_mentions_notify = True
|
|
|
|
user_profile.save()
|
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
|
|
|
msg_id = self.send_stream_message(iago, "Denmark", content="@**all** what's up?")
|
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
|
|
|
|
|
|
|
# We've turned off email notifications for personal mentions, but push notifications
|
|
|
|
# for personal mentions are still on.
|
|
|
|
# Because `wildcard_mentions_notify` is True, a message with `@all` should follow the
|
|
|
|
# personal mention settings
|
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
|
|
|
args_dict=args_dict,
|
|
|
|
message_id=msg_id,
|
|
|
|
wildcard_mention_email_notify=False,
|
|
|
|
wildcard_mention_push_notify=True,
|
|
|
|
already_notified={"email_notified": False, "push_notified": True},
|
|
|
|
)
|
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
user_profile.enable_offline_email_notifications = True
|
|
|
|
user_profile.wildcard_mentions_notify = True
|
|
|
|
user_profile.save()
|
|
|
|
|
2021-07-01 17:40:16 +02:00
|
|
|
# Test with a user group mention
|
|
|
|
hamlet_and_cordelia = create_user_group(
|
|
|
|
"hamlet_and_cordelia", [user_profile, cordelia], cordelia.realm
|
|
|
|
)
|
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
|
|
|
msg_id = self.send_stream_message(
|
|
|
|
iago, "Denmark", content="@*hamlet_and_cordelia* what's up?"
|
|
|
|
)
|
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
|
|
|
|
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
|
|
|
args_dict=args_dict,
|
|
|
|
message_id=msg_id,
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
mention_push_notify=True,
|
|
|
|
mention_email_notify=True,
|
2021-07-01 17:40:16 +02:00
|
|
|
mentioned_user_group_id=hamlet_and_cordelia.id,
|
|
|
|
already_notified={"email_notified": True, "push_notified": True},
|
|
|
|
)
|
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
remove_user_from_user_group(user_profile, hamlet_and_cordelia)
|
|
|
|
remove_user_from_user_group(cordelia, hamlet_and_cordelia)
|
|
|
|
|
2019-08-26 04:35:47 +02:00
|
|
|
# Test the hook with a stream message with stream_push_notify
|
2021-02-12 08:20:45 +01:00
|
|
|
change_subscription_properties(user_profile, stream, sub, {"push_notifications": True})
|
2019-08-26 04:35:47 +02:00
|
|
|
client_descriptor = allocate_event_queue()
|
2017-09-28 00:04:32 +02:00
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_stream_message(iago, "Denmark", content="what's up everyone?")
|
2017-09-28 00:04:32 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2017-09-28 00:04:32 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
|
|
|
stream_push_notify=True,
|
|
|
|
stream_email_notify=False,
|
2021-06-18 14:16:16 +02:00
|
|
|
already_notified={"email_notified": False, "push_notified": True},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
2017-11-21 07:24:24 +01:00
|
|
|
|
2019-08-26 04:35:47 +02:00
|
|
|
# Test the hook with a stream message with stream_email_notify
|
|
|
|
client_descriptor = allocate_event_queue()
|
2021-02-12 08:19:30 +01:00
|
|
|
change_subscription_properties(
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profile, stream, sub, {"push_notifications": False, "email_notifications": True}
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2017-11-21 07:24:24 +01:00
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_stream_message(iago, "Denmark", content="what's up everyone?")
|
2017-11-21 07:24:24 +01:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2017-11-21 07:24:24 +01:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
|
|
|
stream_push_notify=False,
|
|
|
|
stream_email_notify=True,
|
2021-06-18 14:16:16 +02:00
|
|
|
already_notified={"email_notified": True, "push_notified": False},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
2017-09-28 00:04:32 +02:00
|
|
|
|
2019-08-26 04:35:47 +02:00
|
|
|
# Test the hook with stream message with stream_push_notify on
|
|
|
|
# a muted topic, which we should not push notify for
|
|
|
|
client_descriptor = allocate_event_queue()
|
2021-02-12 08:19:30 +01:00
|
|
|
change_subscription_properties(
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profile, stream, sub, {"push_notifications": True, "email_notifications": False}
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
|
2017-10-18 07:49:03 +02:00
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2020-10-16 17:29:05 +02:00
|
|
|
do_mute_topic(user_profile, stream, "mutingtest")
|
2021-02-12 08:19:30 +01:00
|
|
|
msg_id = self.send_stream_message(
|
2021-06-11 14:27:00 +02:00
|
|
|
iago,
|
2021-02-12 08:19:30 +01:00
|
|
|
"Denmark",
|
|
|
|
content="what's up everyone?",
|
|
|
|
topic_name="mutingtest",
|
|
|
|
)
|
2017-10-18 07:49:03 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2017-10-18 07:49:03 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
|
|
|
already_notified={"email_notified": False, "push_notified": False},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
2017-10-18 07:49:03 +02:00
|
|
|
|
2019-08-26 04:35:47 +02:00
|
|
|
# Test the hook with stream message with stream_email_notify on
|
|
|
|
# a muted stream, which we should not push notify for
|
|
|
|
client_descriptor = allocate_event_queue()
|
2021-02-12 08:19:30 +01:00
|
|
|
change_subscription_properties(
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profile, stream, sub, {"push_notifications": False, "email_notifications": True}
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
|
2017-10-18 07:38:54 +02:00
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
2021-02-12 08:20:45 +01:00
|
|
|
change_subscription_properties(user_profile, stream, sub, {"is_muted": True})
|
2021-06-11 14:27:00 +02:00
|
|
|
msg_id = self.send_stream_message(iago, "Denmark", content="what's up everyone?")
|
2017-10-18 07:38:54 +02:00
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
|
|
|
mock_enqueue.assert_called_once()
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
2017-10-18 07:38:54 +02:00
|
|
|
|
2021-05-31 10:52:32 +02:00
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
2021-06-23 12:25:38 +02:00
|
|
|
args_dict=args_dict,
|
2021-05-31 10:52:32 +02:00
|
|
|
message_id=msg_id,
|
|
|
|
already_notified={"email_notified": False, "push_notified": False},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-26 04:35:47 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
2017-10-18 07:38:54 +02:00
|
|
|
|
|
|
|
# Clean up the state we just changed (not necessary unless we add more test code below)
|
2021-02-12 08:19:30 +01:00
|
|
|
change_subscription_properties(
|
2021-02-12 08:20:45 +01:00
|
|
|
user_profile, stream, sub, {"push_notifications": True, "is_muted": False}
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
|
2021-06-14 14:30:45 +02:00
|
|
|
# Test the hook when the sender has been muted
|
|
|
|
result = self.api_post(user_profile, f"/api/v1/users/me/muted_users/{iago.id}")
|
|
|
|
self.assert_json_success(result)
|
|
|
|
client_descriptor = allocate_event_queue()
|
|
|
|
self.assertTrue(client_descriptor.event_queue.empty())
|
|
|
|
msg_id = self.send_personal_message(iago, user_profile)
|
|
|
|
with mock.patch("zerver.tornado.event_queue.maybe_enqueue_notifications") as mock_enqueue:
|
|
|
|
missedmessage_hook(user_profile.id, client_descriptor, True)
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
mock_enqueue.assert_called_once()
|
|
|
|
args_dict = mock_enqueue.call_args_list[0][1]
|
|
|
|
|
|
|
|
assert_maybe_enqueue_notifications_call_args(
|
|
|
|
args_dict=args_dict,
|
|
|
|
message_id=msg_id,
|
|
|
|
sender_is_muted=True,
|
notifications: Calculate PMs/mentions settings like other settings.
Previously, we checked for the `enable_offline_email_notifications` and
`enable_offline_push_notifications` settings (which determine whether the
user will receive notifications for PMs and mentions) just before sending
notifications. This has a few problem:
1. We do not have access to all the user settings in the notification
handlers (`handle_missedmessage_emails` and `handle_push_notifications`),
and therefore, we cannot correctly determine whether the notification should
be sent. Checks like the following which existed previously, will, for
example, incorrectly not send notifications even when stream email
notifications are enabled-
```
if not receives_offline_email_notifications(user_profile):
return
```
With this commit, we simply do not enqueue notifications if the "offline"
settings are disabled, which fixes that bug.
Additionally, this also fixes a bug with the "online push notifications"
feature, which was, if someone were to:
* turn off notifications for PMs and mentions (`enable_offline_push_notifications`)
* turn on stream push notifications (`enable_stream_push_notifications`)
* turn on "online push" (`enable_online_push_notifications`)
then, they would still receive notifications for PMs when online.
This isn't how the "online push enabled" feature is supposed to work;
it should only act as a wrapper around the other notification settings.
The buggy code was this in `handle_push_notifications`:
```
if not (
receives_offline_push_notifications(user_profile)
or receives_online_push_notifications(user_profile)
):
return
// send notifications
```
This commit removes that code, and extends our `notification_data.py` logic
to cover this case, along with tests.
2. The name for these settings is slightly misleading. They essentially
talk about "what to send notifications for" (PMs and mentions), and not
"when to send notifications" (offline). This commit improves this condition
by restricting the use of this term only to the database field, and using
clearer names everywhere else. This distinction will be important to have
non-confusing code when we implement multiple options for notifications
in the future as dropdown (never/when offline/when offline or online, etc).
3. We should ideally re-check all notification settings just before the
notifications are sent. This is especially important for email notifications,
which may be sent after a long time after the message was sent. We will
in the future add code to thoroughly re-check settings before sending
notifications in a clean manner, but temporarily not re-checking isn't
a terrible scenario either.
2021-07-14 15:34:01 +02:00
|
|
|
pm_email_notify=True,
|
|
|
|
pm_push_notify=True,
|
maybe_enqueue_notifications: Take in notification_data dataclass.
* Modify `maybe_enqueue_notifications` to take in an instance of the
dataclass introduced in 951b49c048ba3464e74ad7965da3453fe36d0a96.
* The `check_notify` tests tested the "when to notify" logic in a way
which involved `maybe_enqueue_notifications`. To simplify things, we've
earlier extracted this logic in 8182632d7e9f8490b9b9295e01b5912dcf173fd5.
So, we just kill off the `check_notify` test, and keep only those parts
which verify the queueing and return value behavior of that funtion.
* We retain the the missedmessage_hook and message
message_edit_notifications since they are more integration-style.
* There's a slightly subtle change with the missedmessage_hook tests.
Before this commit, we short-circuited the hook if the sender was muted
(5a642cea115be159175d1189f83ba25d2c5c7632).
With this commit, we delegate the check to our dataclass methods.
So, `maybe_enqueue_notifications` will be called even if the sender was
muted, and the test needs to be updated.
* In our test helper `get_maybe_enqueue_notifications_parameters` which
generates default values for testing `maybe_enqueue_notifications` calls,
we keep `message_id`, `sender_id`, and `user_id` as required arguments,
so that the tests are super-clear and avoid accidental false positives.
* Because `do_update_embedded_data` also sends `update_message` events,
we deal with that case with some hacky code for now. See the comment
there.
This mostly completes the extraction of the "when to notify" logic into
our new `notification_data` module.
2021-06-23 14:12:32 +02:00
|
|
|
already_notified={"email_notified": False, "push_notified": False},
|
|
|
|
)
|
2021-06-14 14:30:45 +02:00
|
|
|
destroy_event_queue(client_descriptor.event_queue.id)
|
|
|
|
result = self.api_delete(user_profile, f"/api/v1/users/me/muted_users/{iago.id}")
|
|
|
|
self.assert_json_success(result)
|
|
|
|
|
2018-10-04 00:22:39 +02:00
|
|
|
|
|
|
|
class FileReloadLogicTest(ZulipTestCase):
|
|
|
|
def test_persistent_queue_filename(self) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
with self.settings(
|
|
|
|
JSON_PERSISTENT_QUEUE_FILENAME_PATTERN="/home/zulip/tornado/event_queues%s.json"
|
|
|
|
):
|
|
|
|
self.assertEqual(
|
|
|
|
persistent_queue_filename(9800), "/home/zulip/tornado/event_queues.json"
|
|
|
|
)
|
|
|
|
self.assertEqual(
|
|
|
|
persistent_queue_filename(9800, last=True),
|
|
|
|
"/home/zulip/tornado/event_queues.json.last",
|
|
|
|
)
|
|
|
|
with self.settings(
|
|
|
|
JSON_PERSISTENT_QUEUE_FILENAME_PATTERN="/home/zulip/tornado/event_queues%s.json",
|
|
|
|
TORNADO_PROCESSES=4,
|
|
|
|
):
|
|
|
|
self.assertEqual(
|
|
|
|
persistent_queue_filename(9800), "/home/zulip/tornado/event_queues.9800.json"
|
|
|
|
)
|
|
|
|
self.assertEqual(
|
|
|
|
persistent_queue_filename(9800, last=True),
|
|
|
|
"/home/zulip/tornado/event_queues.9800.last.json",
|
|
|
|
)
|
|
|
|
|
2019-08-02 21:50:27 +02:00
|
|
|
|
2020-12-22 10:43:34 +01:00
|
|
|
class PruneInternalDataTest(ZulipTestCase):
|
|
|
|
def test_prune_internal_data(self) -> None:
|
|
|
|
user_profile = self.example_user("hamlet")
|
|
|
|
queue_data = dict(
|
|
|
|
all_public_streams=True,
|
|
|
|
apply_markdown=True,
|
|
|
|
client_gravatar=True,
|
|
|
|
client_type_name="website",
|
|
|
|
event_types=["message"],
|
|
|
|
last_connection_time=time.time(),
|
|
|
|
queue_timeout=600,
|
|
|
|
realm_id=user_profile.realm.id,
|
|
|
|
user_profile_id=user_profile.id,
|
|
|
|
)
|
|
|
|
client = allocate_client_descriptor(queue_data)
|
|
|
|
self.assertTrue(client.event_queue.empty())
|
|
|
|
|
|
|
|
self.send_stream_message(
|
|
|
|
self.example_user("iago"), "Denmark", content="@**King Hamlet** what's up?"
|
|
|
|
)
|
|
|
|
self.send_stream_message(
|
|
|
|
self.example_user("iago"), "Denmark", content="@**all** what's up?"
|
|
|
|
)
|
|
|
|
self.send_personal_message(self.example_user("iago"), user_profile)
|
|
|
|
|
|
|
|
events = client.event_queue.contents()
|
2021-05-17 05:41:32 +02:00
|
|
|
self.assert_length(events, 3)
|
2020-12-22 10:43:34 +01:00
|
|
|
self.assertFalse("internal_data" in events[0])
|
|
|
|
self.assertFalse("internal_data" in events[1])
|
|
|
|
self.assertFalse("internal_data" in events[2])
|
|
|
|
|
|
|
|
events = client.event_queue.contents(include_internal_data=True)
|
|
|
|
self.assertTrue("internal_data" in events[0])
|
|
|
|
self.assertTrue("internal_data" in events[1])
|
|
|
|
self.assertTrue("internal_data" in events[2])
|
|
|
|
|
|
|
|
|
2019-08-02 21:50:27 +02:00
|
|
|
class EventQueueTest(ZulipTestCase):
|
2019-08-02 22:23:05 +02:00
|
|
|
def get_client_descriptor(self) -> ClientDescriptor:
|
2021-02-12 08:20:45 +01:00
|
|
|
hamlet = self.example_user("hamlet")
|
2019-08-02 22:23:05 +02:00
|
|
|
realm = hamlet.realm
|
|
|
|
queue_data = dict(
|
|
|
|
all_public_streams=False,
|
|
|
|
apply_markdown=False,
|
|
|
|
client_gravatar=True,
|
2021-02-12 08:20:45 +01:00
|
|
|
client_type_name="website",
|
2019-08-02 22:23:05 +02:00
|
|
|
event_types=None,
|
|
|
|
last_connection_time=time.time(),
|
|
|
|
queue_timeout=0,
|
|
|
|
realm_id=realm.id,
|
|
|
|
user_profile_id=hamlet.id,
|
|
|
|
)
|
|
|
|
|
|
|
|
client = allocate_client_descriptor(queue_data)
|
|
|
|
return client
|
|
|
|
|
|
|
|
def verify_to_dict_end_to_end(self, client: ClientDescriptor) -> None:
|
|
|
|
client_dict = client.to_dict()
|
|
|
|
new_client = ClientDescriptor.from_dict(client_dict)
|
|
|
|
self.assertEqual(client.to_dict(), new_client.to_dict())
|
|
|
|
|
2019-08-06 00:08:56 +02:00
|
|
|
client_dict = client.to_dict()
|
2021-02-12 08:20:45 +01:00
|
|
|
del client_dict["event_queue"]["newest_pruned_id"]
|
2019-08-06 00:08:56 +02:00
|
|
|
new_client = ClientDescriptor.from_dict(client_dict)
|
|
|
|
self.assertEqual(client_dict, new_client.to_dict())
|
|
|
|
|
2019-08-02 21:50:27 +02:00
|
|
|
def test_one_event(self) -> None:
|
2019-08-02 22:23:05 +02:00
|
|
|
client = self.get_client_descriptor()
|
|
|
|
queue = client.event_queue
|
2020-06-26 13:40:19 +02:00
|
|
|
in_dict = dict(
|
|
|
|
type="arbitrary",
|
|
|
|
x="foo",
|
|
|
|
y=42,
|
|
|
|
z=False,
|
|
|
|
timestamp="1",
|
|
|
|
)
|
|
|
|
out_dict = dict(
|
|
|
|
id=0,
|
|
|
|
**in_dict,
|
|
|
|
)
|
|
|
|
queue.push(in_dict)
|
2019-08-02 21:50:27 +02:00
|
|
|
self.assertFalse(queue.empty())
|
2019-08-02 22:23:05 +02:00
|
|
|
self.verify_to_dict_end_to_end(client)
|
2020-06-26 13:40:19 +02:00
|
|
|
self.assertEqual(queue.contents(), [out_dict])
|
2019-08-02 22:23:05 +02:00
|
|
|
self.verify_to_dict_end_to_end(client)
|
2019-08-02 21:50:27 +02:00
|
|
|
|
|
|
|
def test_event_collapsing(self) -> None:
|
2019-08-02 22:23:05 +02:00
|
|
|
client = self.get_client_descriptor()
|
|
|
|
queue = client.event_queue
|
2019-08-02 21:50:27 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2020-06-26 14:14:36 +02:00
|
|
|
The update_message_flags events are special, because
|
|
|
|
they can be collapsed together. Given two umfe's, we:
|
|
|
|
* use the latest timestamp
|
|
|
|
* concatenate the messages
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-06-26 14:14:36 +02:00
|
|
|
def umfe(timestamp: int, messages: List[int]) -> Dict[str, Any]:
|
|
|
|
return dict(
|
2021-02-12 08:20:45 +01:00
|
|
|
type="update_message_flags",
|
|
|
|
operation="add",
|
|
|
|
flag="read",
|
2020-06-26 14:14:36 +02:00
|
|
|
all=False,
|
|
|
|
timestamp=timestamp,
|
|
|
|
messages=messages,
|
|
|
|
)
|
|
|
|
|
|
|
|
events = [
|
|
|
|
umfe(timestamp=1, messages=[101]),
|
|
|
|
umfe(timestamp=2, messages=[201, 202]),
|
2021-02-12 08:20:45 +01:00
|
|
|
dict(type="unknown"),
|
|
|
|
dict(type="restart", server_generation="1"),
|
2020-06-26 14:14:36 +02:00
|
|
|
umfe(timestamp=3, messages=[301, 302, 303]),
|
2021-02-12 08:20:45 +01:00
|
|
|
dict(type="restart", server_generation="2"),
|
2020-06-26 14:14:36 +02:00
|
|
|
umfe(timestamp=4, messages=[401, 402, 403, 404]),
|
|
|
|
]
|
|
|
|
|
|
|
|
for event in events:
|
|
|
|
queue.push(event)
|
2019-08-02 22:23:05 +02:00
|
|
|
|
|
|
|
self.verify_to_dict_end_to_end(client)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
self.assertEqual(
|
|
|
|
queue.contents(),
|
|
|
|
[
|
2021-02-12 08:20:45 +01:00
|
|
|
dict(id=2, type="unknown"),
|
|
|
|
dict(id=5, type="restart", server_generation="2"),
|
2021-02-12 08:19:30 +01:00
|
|
|
dict(
|
|
|
|
id=6,
|
2021-02-12 08:20:45 +01:00
|
|
|
type="update_message_flags",
|
|
|
|
operation="add",
|
|
|
|
flag="read",
|
2021-02-12 08:19:30 +01:00
|
|
|
all=False,
|
|
|
|
timestamp=4,
|
|
|
|
messages=[101, 201, 202, 301, 302, 303, 401, 402, 403, 404],
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
2019-08-02 22:23:05 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2020-06-26 14:14:36 +02:00
|
|
|
Note that calling queue.contents() has the side
|
|
|
|
effect that we will no longer be able to collapse
|
|
|
|
the previous events, so the next event will just
|
|
|
|
get added to the queue, rather than collapsed.
|
2021-02-12 08:20:45 +01:00
|
|
|
"""
|
2020-06-26 14:14:36 +02:00
|
|
|
queue.push(
|
|
|
|
umfe(timestamp=5, messages=[501, 502, 503, 504, 505]),
|
|
|
|
)
|
2021-02-12 08:19:30 +01:00
|
|
|
self.assertEqual(
|
|
|
|
queue.contents(),
|
|
|
|
[
|
2021-02-12 08:20:45 +01:00
|
|
|
dict(id=2, type="unknown"),
|
|
|
|
dict(id=5, type="restart", server_generation="2"),
|
2021-02-12 08:19:30 +01:00
|
|
|
dict(
|
|
|
|
id=6,
|
2021-02-12 08:20:45 +01:00
|
|
|
type="update_message_flags",
|
|
|
|
operation="add",
|
|
|
|
flag="read",
|
2021-02-12 08:19:30 +01:00
|
|
|
all=False,
|
|
|
|
timestamp=4,
|
|
|
|
messages=[101, 201, 202, 301, 302, 303, 401, 402, 403, 404],
|
|
|
|
),
|
|
|
|
dict(
|
|
|
|
id=7,
|
2021-02-12 08:20:45 +01:00
|
|
|
type="update_message_flags",
|
|
|
|
operation="add",
|
|
|
|
flag="read",
|
2021-02-12 08:19:30 +01:00
|
|
|
all=False,
|
|
|
|
timestamp=5,
|
|
|
|
messages=[501, 502, 503, 504, 505],
|
|
|
|
),
|
|
|
|
],
|
|
|
|
)
|
2019-08-02 21:50:27 +02:00
|
|
|
|
|
|
|
def test_flag_add_collapsing(self) -> None:
|
2019-08-02 22:23:05 +02:00
|
|
|
client = self.get_client_descriptor()
|
|
|
|
queue = client.event_queue
|
2021-02-12 08:19:30 +01:00
|
|
|
queue.push(
|
|
|
|
{
|
|
|
|
"type": "update_message_flags",
|
|
|
|
"flag": "read",
|
|
|
|
"operation": "add",
|
|
|
|
"all": False,
|
|
|
|
"messages": [1, 2, 3, 4],
|
|
|
|
"timestamp": "1",
|
|
|
|
}
|
|
|
|
)
|
2019-08-02 22:23:05 +02:00
|
|
|
self.verify_to_dict_end_to_end(client)
|
2021-02-12 08:19:30 +01:00
|
|
|
queue.push(
|
|
|
|
{
|
|
|
|
"type": "update_message_flags",
|
|
|
|
"flag": "read",
|
|
|
|
"all": False,
|
|
|
|
"operation": "add",
|
|
|
|
"messages": [5, 6],
|
|
|
|
"timestamp": "1",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
self.verify_to_dict_end_to_end(client)
|
|
|
|
self.assertEqual(
|
|
|
|
queue.contents(),
|
|
|
|
[
|
|
|
|
{
|
2021-02-12 08:20:45 +01:00
|
|
|
"id": 1,
|
|
|
|
"type": "update_message_flags",
|
2019-08-02 21:50:27 +02:00
|
|
|
"all": False,
|
2021-02-12 08:19:30 +01:00
|
|
|
"flag": "read",
|
2019-08-02 21:50:27 +02:00
|
|
|
"operation": "add",
|
2021-02-12 08:19:30 +01:00
|
|
|
"messages": [1, 2, 3, 4, 5, 6],
|
|
|
|
"timestamp": "1",
|
|
|
|
}
|
|
|
|
],
|
|
|
|
)
|
2019-08-02 22:23:05 +02:00
|
|
|
self.verify_to_dict_end_to_end(client)
|
2019-08-02 21:50:27 +02:00
|
|
|
|
|
|
|
def test_flag_remove_collapsing(self) -> None:
|
2019-08-02 22:23:05 +02:00
|
|
|
client = self.get_client_descriptor()
|
|
|
|
queue = client.event_queue
|
2021-02-12 08:19:30 +01:00
|
|
|
queue.push(
|
|
|
|
{
|
|
|
|
"type": "update_message_flags",
|
|
|
|
"flag": "collapsed",
|
|
|
|
"operation": "remove",
|
|
|
|
"all": False,
|
|
|
|
"messages": [1, 2, 3, 4],
|
|
|
|
"timestamp": "1",
|
|
|
|
}
|
|
|
|
)
|
2019-08-02 22:23:05 +02:00
|
|
|
self.verify_to_dict_end_to_end(client)
|
2021-02-12 08:19:30 +01:00
|
|
|
queue.push(
|
|
|
|
{
|
|
|
|
"type": "update_message_flags",
|
|
|
|
"flag": "collapsed",
|
|
|
|
"all": False,
|
|
|
|
"operation": "remove",
|
|
|
|
"messages": [5, 6],
|
|
|
|
"timestamp": "1",
|
|
|
|
}
|
|
|
|
)
|
|
|
|
self.verify_to_dict_end_to_end(client)
|
|
|
|
self.assertEqual(
|
|
|
|
queue.contents(),
|
|
|
|
[
|
|
|
|
{
|
2021-02-12 08:20:45 +01:00
|
|
|
"id": 1,
|
|
|
|
"type": "update_message_flags",
|
2019-08-02 21:50:27 +02:00
|
|
|
"all": False,
|
2021-02-12 08:19:30 +01:00
|
|
|
"flag": "collapsed",
|
2019-08-02 21:50:27 +02:00
|
|
|
"operation": "remove",
|
2021-02-12 08:19:30 +01:00
|
|
|
"messages": [1, 2, 3, 4, 5, 6],
|
|
|
|
"timestamp": "1",
|
|
|
|
}
|
|
|
|
],
|
|
|
|
)
|
2019-08-02 22:23:05 +02:00
|
|
|
self.verify_to_dict_end_to_end(client)
|
2019-08-02 21:50:27 +02:00
|
|
|
|
|
|
|
def test_collapse_event(self) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2020-06-26 16:11:38 +02:00
|
|
|
This mostly focues on the internals of
|
|
|
|
how we store "virtual_events" that we
|
|
|
|
can collapse if subsequent events are
|
|
|
|
of the same form. See the code in
|
|
|
|
EventQueue.push for more context.
|
2021-02-12 08:19:30 +01:00
|
|
|
"""
|
2019-08-02 22:23:05 +02:00
|
|
|
client = self.get_client_descriptor()
|
|
|
|
queue = client.event_queue
|
2021-02-12 08:19:30 +01:00
|
|
|
queue.push({"type": "restart", "server_generation": 1, "timestamp": "1"})
|
2020-06-26 16:11:38 +02:00
|
|
|
# Verify the server_generation event is stored as a virtual event
|
2021-02-12 08:19:30 +01:00
|
|
|
self.assertEqual(
|
|
|
|
queue.virtual_events,
|
2021-02-12 08:20:45 +01:00
|
|
|
{"restart": {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-02 22:23:05 +02:00
|
|
|
# And we can reconstruct newest_pruned_id etc.
|
|
|
|
self.verify_to_dict_end_to_end(client)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
queue.push({"type": "unknown", "timestamp": "1"})
|
2021-02-12 08:20:45 +01:00
|
|
|
self.assertEqual(list(queue.queue), [{"id": 1, "type": "unknown", "timestamp": "1"}])
|
2021-02-12 08:19:30 +01:00
|
|
|
self.assertEqual(
|
|
|
|
queue.virtual_events,
|
2021-02-12 08:20:45 +01:00
|
|
|
{"restart": {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}},
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2019-08-02 22:23:05 +02:00
|
|
|
# And we can still reconstruct newest_pruned_id etc. correctly
|
|
|
|
self.verify_to_dict_end_to_end(client)
|
|
|
|
|
|
|
|
# Verify virtual events are converted to real events by .contents()
|
2021-02-12 08:19:30 +01:00
|
|
|
self.assertEqual(
|
|
|
|
queue.contents(),
|
|
|
|
[
|
2021-02-12 08:20:45 +01:00
|
|
|
{"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"},
|
|
|
|
{"id": 1, "type": "unknown", "timestamp": "1"},
|
2021-02-12 08:19:30 +01:00
|
|
|
],
|
|
|
|
)
|
2019-08-02 22:23:05 +02:00
|
|
|
|
|
|
|
# And now verify to_dict after pruning
|
|
|
|
queue.prune(0)
|
|
|
|
self.verify_to_dict_end_to_end(client)
|
|
|
|
|
|
|
|
queue.prune(1)
|
|
|
|
self.verify_to_dict_end_to_end(client)
|
2021-06-18 19:30:57 +02:00
|
|
|
|
|
|
|
|
|
|
|
class SchemaMigrationsTests(ZulipTestCase):
|
|
|
|
def test_reformat_legacy_send_message_event(self) -> None:
|
|
|
|
hamlet = self.example_user("hamlet")
|
|
|
|
cordelia = self.example_user("cordelia")
|
|
|
|
othello = self.example_user("othello")
|
|
|
|
old_format_event = dict(
|
|
|
|
type="message",
|
|
|
|
message=1,
|
|
|
|
message_dict={},
|
|
|
|
presence_idle_user_ids=[hamlet.id, othello.id],
|
|
|
|
)
|
|
|
|
old_format_users = [
|
|
|
|
dict(
|
|
|
|
id=hamlet.id,
|
|
|
|
flags=["mentioned"],
|
|
|
|
mentioned=True,
|
|
|
|
online_push_enabled=True,
|
|
|
|
stream_push_notify=False,
|
|
|
|
stream_email_notify=True,
|
|
|
|
wildcard_mention_notify=False,
|
|
|
|
sender_is_muted=False,
|
|
|
|
),
|
|
|
|
dict(
|
|
|
|
id=cordelia.id,
|
|
|
|
flags=["wildcard_mentioned"],
|
|
|
|
mentioned=False,
|
|
|
|
online_push_enabled=True,
|
|
|
|
stream_push_notify=True,
|
|
|
|
stream_email_notify=False,
|
|
|
|
wildcard_mention_notify=True,
|
|
|
|
sender_is_muted=False,
|
|
|
|
),
|
|
|
|
]
|
|
|
|
notice = dict(event=old_format_event, users=old_format_users)
|
|
|
|
|
|
|
|
expected_current_format_users = [
|
|
|
|
dict(
|
|
|
|
id=hamlet.id,
|
|
|
|
flags=["mentioned"],
|
|
|
|
),
|
|
|
|
dict(
|
|
|
|
id=cordelia.id,
|
|
|
|
flags=["wildcard_mentioned"],
|
|
|
|
),
|
|
|
|
]
|
|
|
|
|
|
|
|
expected_current_format_event = dict(
|
|
|
|
type="message",
|
|
|
|
message=1,
|
|
|
|
message_dict={},
|
|
|
|
presence_idle_user_ids=[hamlet.id, othello.id],
|
|
|
|
online_push_user_ids=[hamlet.id, cordelia.id],
|
|
|
|
stream_push_user_ids=[cordelia.id],
|
|
|
|
stream_email_user_ids=[hamlet.id],
|
|
|
|
wildcard_mention_user_ids=[cordelia.id],
|
|
|
|
muted_sender_user_ids=[],
|
|
|
|
)
|
|
|
|
with mock.patch("zerver.tornado.event_queue.process_message_event") as m:
|
|
|
|
process_notification(notice)
|
|
|
|
m.assert_called_once()
|
|
|
|
self.assertDictEqual(m.call_args[0][0], expected_current_format_event)
|
|
|
|
self.assertEqual(m.call_args[0][1], expected_current_format_users)
|