zulip/zerver/tests/test_event_system.py

1348 lines
52 KiB
Python
Raw Normal View History

import time
from typing import Any, Callable, Dict, List
from unittest import mock
from urllib.parse import urlsplit
import orjson
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.test import override_settings
from django.utils.timezone import now as timezone_now
from typing_extensions import override
from zerver.actions.custom_profile_fields import try_update_realm_custom_profile_field
from zerver.actions.message_send import check_send_message
from zerver.actions.presence import do_update_user_presence
from zerver.actions.user_settings import do_change_user_setting
from zerver.actions.users import do_change_user_role
from zerver.lib.event_schema import check_web_reload_client_event
from zerver.lib.events import fetch_initial_state_data
2021-07-04 08:45:34 +02:00
from zerver.lib.exceptions import AccessDeniedError
from zerver.lib.request import RequestVariableMissingError
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
HostRequestMock,
dummy_handler,
reset_email_visibility_to_everyone_in_zulip_realm,
stub_event_queue_user_events,
)
from zerver.lib.users import get_api_key, get_users_for_api
from zerver.models import CustomProfileField, UserMessage, UserPresence, UserProfile
from zerver.models.clients import get_client
from zerver.models.realms import get_realm, get_realm_with_settings
from zerver.models.streams import get_stream
from zerver.models.users import get_system_bot
from zerver.tornado.event_queue import (
allocate_client_descriptor,
clear_client_event_queues_for_testing,
get_client_info_for_message_event,
mark_clients_to_reload,
process_message_event,
send_web_reload_client_events,
)
from zerver.tornado.exceptions import BadEventQueueIdError
from zerver.tornado.views import get_events
from zerver.views.events_register import _default_all_public_streams, _default_narrow
class EventsEndpointTest(ZulipTestCase):
def test_events_register_without_user_agent(self) -> None:
result = self.client_post("/json/register", skip_user_agent=True)
self.assert_json_success(result)
def test_narrows(self) -> None:
user = self.example_user("hamlet")
with mock.patch("zerver.lib.events.request_event_queue", return_value=None) as m:
munge = lambda obj: orjson.dumps(obj).decode()
narrow = [["stream", "devel"], ["is", "mentioned"]]
payload = dict(narrow=munge(narrow))
result = self.api_post(user, "/api/v1/register", payload)
# We want the test to abort before actually fetching data.
self.assert_json_error(result, "Could not allocate event queue")
self.assertEqual(m.call_args.kwargs["narrow"], [["stream", "devel"], ["is", "mentioned"]])
def test_events_register_endpoint(self) -> None:
# This test is intended to get minimal coverage on the
# events_register code paths
user = self.example_user("hamlet")
with mock.patch("zerver.views.events_register.do_events_register", return_value={}):
result = self.api_post(user, "/api/v1/register")
self.assert_json_success(result)
with mock.patch("zerver.lib.events.request_event_queue", return_value=None):
result = self.api_post(user, "/api/v1/register")
self.assert_json_error(result, "Could not allocate event queue")
return_event_queue = "15:11"
return_user_events: List[Dict[str, Any]] = []
# We choose realm_emoji somewhat randomly--we want
# a "boring" event type for the purpose of this test.
event_type = "realm_emoji"
empty_realm_emoji_dict: Dict[str, Any] = {}
test_event = dict(id=6, type=event_type, realm_emoji=empty_realm_emoji_dict)
# Test that call is made to deal with a returning soft deactivated user.
with mock.patch("zerver.lib.events.reactivate_user_if_soft_deactivated") as fa:
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(
user, "/api/v1/register", dict(event_types=orjson.dumps([event_type]).decode())
)
self.assertEqual(fa.call_count, 1)
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(
user, "/api/v1/register", dict(event_types=orjson.dumps([event_type]).decode())
)
result_dict = self.assert_json_success(result)
self.assertEqual(result_dict["last_event_id"], -1)
self.assertEqual(result_dict["queue_id"], "15:11")
# Now start simulating returning actual data
return_event_queue = "15:12"
return_user_events = [test_event]
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(
user, "/api/v1/register", dict(event_types=orjson.dumps([event_type]).decode())
)
result_dict = self.assert_json_success(result)
self.assertEqual(result_dict["last_event_id"], 6)
self.assertEqual(result_dict["queue_id"], "15:12")
# sanity check the data relevant to our event
self.assertEqual(result_dict["realm_emoji"], {})
# Now test with `fetch_event_types` not matching the event
return_event_queue = "15:13"
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(
user,
"/api/v1/register",
dict(
event_types=orjson.dumps([event_type]).decode(),
fetch_event_types=orjson.dumps(["message"]).decode(),
),
)
result_dict = self.assert_json_success(result)
self.assertEqual(result_dict["last_event_id"], 6)
# Check that the message event types data is in there
self.assertIn("max_message_id", result_dict)
# Check that our original event type is not there.
self.assertNotIn(event_type, result_dict)
self.assertEqual(result_dict["queue_id"], "15:13")
# Now test with `fetch_event_types` matching the event
with stub_event_queue_user_events(return_event_queue, return_user_events):
result = self.api_post(
user,
"/api/v1/register",
dict(
fetch_event_types=orjson.dumps([event_type]).decode(),
event_types=orjson.dumps(["message"]).decode(),
),
)
result_dict = self.assert_json_success(result)
self.assertEqual(result_dict["last_event_id"], 6)
# Check that we didn't fetch the messages data
self.assertNotIn("max_message_id", result_dict)
# Check that the realm_emoji data is in there.
self.assertIn("realm_emoji", result_dict)
self.assertEqual(result_dict["realm_emoji"], {})
self.assertEqual(result_dict["queue_id"], "15:13")
def test_events_register_spectators(self) -> None:
# Verify that POST /register works for spectators, but not for
# normal users.
with self.settings(WEB_PUBLIC_STREAMS_ENABLED=False):
result = self.client_post("/json/register")
self.assert_json_error(
result,
"Not logged in: API authentication or user session required",
status_code=401,
)
result = self.client_post("/json/register")
result_dict = self.assert_json_success(result)
self.assertEqual(result_dict["queue_id"], None)
self.assertEqual(result_dict["realm_url"], "http://zulip.testserver")
self.assertEqual(result_dict["realm_uri"], "http://zulip.testserver")
result = self.client_post("/json/register")
self.assertEqual(result.status_code, 200)
result = self.client_post("/json/register", dict(client_gravatar="false"))
self.assertEqual(result.status_code, 200)
result = self.client_post("/json/register", dict(client_gravatar="true"))
self.assert_json_error(
result,
"Invalid 'client_gravatar' parameter for anonymous request",
status_code=400,
)
result = self.client_post("/json/register", dict(include_subscribers="true"))
self.assert_json_error(
result,
"Invalid 'include_subscribers' parameter for anonymous request",
status_code=400,
)
def test_events_register_endpoint_all_public_streams_access(self) -> None:
guest_user = self.example_user("polonius")
normal_user = self.example_user("hamlet")
self.assertEqual(guest_user.role, UserProfile.ROLE_GUEST)
self.assertEqual(normal_user.role, UserProfile.ROLE_MEMBER)
with mock.patch("zerver.views.events_register.do_events_register", return_value={}):
result = self.api_post(normal_user, "/api/v1/register", dict(all_public_streams="true"))
self.assert_json_success(result)
with mock.patch("zerver.views.events_register.do_events_register", return_value={}):
result = self.api_post(guest_user, "/api/v1/register", dict(all_public_streams="true"))
self.assert_json_error(result, "User not authorized for this query")
def test_events_get_events_endpoint_guest_cant_use_all_public_streams_param(self) -> None:
"""
This test is meant to execute the very beginning of the codepath
to ensure guest users are immediately disallowed to use the
all_public_streams param. Deeper testing is hard (and not necessary for this case)
due to the codepath expecting AsyncDjangoHandler to be attached to the request,
which doesn't happen in our test setup.
"""
guest_user = self.example_user("polonius")
self.assertEqual(guest_user.role, UserProfile.ROLE_GUEST)
result = self.api_get(guest_user, "/api/v1/events", dict(all_public_streams="true"))
self.assert_json_error(result, "User not authorized for this query")
def test_tornado_endpoint(self) -> None:
# This test is mostly intended to get minimal coverage on the
# /api/internal/notify_tornado endpoint (only used in
# puppeteer tests), so we can have 100% URL coverage, but it
# does exercise a little bit of the codepath.
post_data = dict(
data=orjson.dumps(
dict(
event=dict(
type="other",
),
users=[self.example_user("hamlet").id],
),
).decode(),
)
req = HostRequestMock(post_data)
req.META["REMOTE_ADDR"] = "127.0.0.1"
with self.assertRaises(RequestVariableMissingError) as context:
result = self.client_post_request("/api/internal/notify_tornado", req)
self.assertEqual(str(context.exception), "Missing 'secret' argument")
self.assertEqual(context.exception.http_status_code, 400)
post_data["secret"] = "random"
req = HostRequestMock(post_data, user_profile=None)
req.META["REMOTE_ADDR"] = "127.0.0.1"
with self.assertRaises(AccessDeniedError) as access_denied_error:
result = self.client_post_request("/api/internal/notify_tornado", req)
self.assertEqual(str(access_denied_error.exception), "Access denied")
self.assertEqual(access_denied_error.exception.http_status_code, 403)
post_data["secret"] = settings.SHARED_SECRET
req = HostRequestMock(post_data, tornado_handler=dummy_handler)
req.META["REMOTE_ADDR"] = "127.0.0.1"
result = self.client_post_request("/api/internal/notify_tornado", req)
self.assert_json_success(result)
post_data = dict(secret=settings.SHARED_SECRET)
req = HostRequestMock(post_data, tornado_handler=dummy_handler)
req.META["REMOTE_ADDR"] = "127.0.0.1"
with self.assertRaises(RequestVariableMissingError) as context:
result = self.client_post_request("/api/internal/notify_tornado", req)
self.assertEqual(str(context.exception), "Missing 'data' argument")
self.assertEqual(context.exception.http_status_code, 400)
def test_web_reload_clients(self) -> None:
# Minimal testing of the /api/internal/web_reload_clients endpoint
post_data = {
"client_count": "1",
"immediate": orjson.dumps(False).decode(),
"secret": settings.SHARED_SECRET,
}
req = HostRequestMock(post_data, tornado_handler=dummy_handler)
req.META["REMOTE_ADDR"] = "127.0.0.1"
result = self.client_post_request("/api/internal/web_reload_clients", req)
self.assert_json_success(result)
self.assertEqual(orjson.loads(result.content)["sent_events"], 0)
class GetEventsTest(ZulipTestCase):
def tornado_call(
self,
view_func: Callable[[HttpRequest, UserProfile], HttpResponse],
user_profile: UserProfile,
post_data: Dict[str, Any],
) -> HttpResponse:
request = HostRequestMock(post_data, user_profile, tornado_handler=dummy_handler)
return view_func(request, user_profile)
def test_get_events(self) -> None:
user_profile = self.example_user("hamlet")
email = user_profile.email
recipient_user_profile = self.example_user("othello")
recipient_email = recipient_user_profile.email
self.login_user(user_profile)
result = self.tornado_call(
get_events,
user_profile,
{
"apply_markdown": orjson.dumps(True).decode(),
"client_gravatar": orjson.dumps(True).decode(),
"event_types": orjson.dumps(["message"]).decode(),
"user_client": "website",
"dont_block": orjson.dumps(True).decode(),
},
)
self.assert_json_success(result)
queue_id = orjson.loads(result.content)["queue_id"]
recipient_result = self.tornado_call(
get_events,
recipient_user_profile,
{
"apply_markdown": orjson.dumps(True).decode(),
"client_gravatar": orjson.dumps(True).decode(),
"event_types": orjson.dumps(["message"]).decode(),
"user_client": "website",
"dont_block": orjson.dumps(True).decode(),
},
)
self.assert_json_success(recipient_result)
recipient_queue_id = orjson.loads(recipient_result.content)["queue_id"]
result = self.tornado_call(
get_events,
user_profile,
{
"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": orjson.dumps(True).decode(),
},
)
events = orjson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
local_id = "10.01"
with self.captureOnCommitCallbacks(execute=True):
check_send_message(
sender=user_profile,
client=get_client("whatever"),
recipient_type_name="private",
message_to=[recipient_email],
topic_name=None,
message_content="hello",
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(
get_events,
user_profile,
{
"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": orjson.dumps(True).decode(),
},
)
events = orjson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
self.assertEqual(events[0]["message"]["display_recipient"][0]["is_mirror_dummy"], False)
self.assertEqual(events[0]["message"]["display_recipient"][1]["is_mirror_dummy"], False)
last_event_id = events[0]["id"]
local_id = "10.02"
with self.captureOnCommitCallbacks(execute=True):
check_send_message(
sender=user_profile,
client=get_client("whatever"),
recipient_type_name="private",
message_to=[recipient_email],
topic_name=None,
message_content="hello",
local_id=local_id,
sender_queue_id=queue_id,
)
result = self.tornado_call(
get_events,
user_profile,
{
"queue_id": queue_id,
"user_client": "website",
"last_event_id": last_event_id,
"dont_block": orjson.dumps(True).decode(),
},
)
events = orjson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
self.assertEqual(events[0]["message"]["sender_email"], email)
self.assertEqual(events[0]["local_message_id"], local_id)
# Test that the received message in the receiver's event queue
# exists and does not contain a local id
recipient_result = self.tornado_call(
get_events,
recipient_user_profile,
{
"queue_id": recipient_queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": orjson.dumps(True).decode(),
},
)
recipient_events = orjson.loads(recipient_result.content)["events"]
self.assert_json_success(recipient_result)
self.assert_length(recipient_events, 2)
self.assertEqual(recipient_events[0]["type"], "message")
self.assertEqual(recipient_events[0]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[0])
self.assertEqual(recipient_events[1]["type"], "message")
self.assertEqual(recipient_events[1]["message"]["sender_email"], email)
self.assertTrue("local_message_id" not in recipient_events[1])
def test_get_events_narrow(self) -> None:
user_profile = self.example_user("hamlet")
self.login_user(user_profile)
def get_message(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
result = self.tornado_call(
get_events,
user_profile,
dict(
apply_markdown=orjson.dumps(apply_markdown).decode(),
client_gravatar=orjson.dumps(client_gravatar).decode(),
event_types=orjson.dumps(["message"]).decode(),
narrow=orjson.dumps([["stream", "denmark"]]).decode(),
user_client="website",
dont_block=orjson.dumps(True).decode(),
),
)
self.assert_json_success(result)
queue_id = orjson.loads(result.content)["queue_id"]
result = self.tornado_call(
get_events,
user_profile,
{
"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": orjson.dumps(True).decode(),
},
)
events = orjson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 0)
self.send_personal_message(user_profile, self.example_user("othello"), "hello")
self.send_stream_message(user_profile, "Denmark", "**hello**")
result = self.tornado_call(
get_events,
user_profile,
{
"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": orjson.dumps(True).decode(),
},
)
events = orjson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]["type"], "message")
return events[0]["message"]
message = get_message(apply_markdown=False, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertEqual(urlsplit(message["avatar_url"]).hostname, "secure.gravatar.com")
message = get_message(apply_markdown=True, client_gravatar=False)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertIn("gravatar.com", message["avatar_url"])
do_change_user_setting(
user_profile,
"email_address_visibility",
UserProfile.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
acting_user=None,
)
message = get_message(apply_markdown=False, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "**hello**")
self.assertEqual(message["avatar_url"], None)
message = get_message(apply_markdown=True, client_gravatar=True)
self.assertEqual(message["display_recipient"], "Denmark")
self.assertEqual(message["content"], "<p><strong>hello</strong></p>")
self.assertEqual(message["avatar_url"], None)
def test_bogus_queue_id(self) -> None:
user = self.example_user("hamlet")
with self.assertRaises(BadEventQueueIdError):
self.tornado_call(
get_events,
user,
{
"queue_id": "hamster",
"user_client": "website",
"last_event_id": -1,
"dont_block": orjson.dumps(True).decode(),
},
)
def test_wrong_user_queue_id(self) -> None:
user = self.example_user("hamlet")
wrong_user = self.example_user("othello")
result = self.tornado_call(
get_events,
user,
{
"apply_markdown": orjson.dumps(True).decode(),
"client_gravatar": orjson.dumps(True).decode(),
"event_types": orjson.dumps(["message"]).decode(),
"user_client": "website",
"dont_block": orjson.dumps(True).decode(),
},
)
self.assert_json_success(result)
queue_id = orjson.loads(result.content)["queue_id"]
with self.assertLogs(level="WARNING") as cm, self.assertRaises(BadEventQueueIdError):
self.tornado_call(
get_events,
wrong_user,
{
"queue_id": queue_id,
"user_client": "website",
"last_event_id": -1,
"dont_block": orjson.dumps(True).decode(),
},
)
self.assertIn("not authorized for queue", cm.output[0])
def test_get_events_custom_profile_fields(self) -> None:
user_profile = self.example_user("iago")
self.login_user(user_profile)
profile_field = CustomProfileField.objects.get(realm=user_profile.realm, name="Pronouns")
def check_pronouns_type_field_supported(
pronouns_field_type_supported: bool, new_name: str
) -> None:
clear_client_event_queues_for_testing()
queue_data = dict(
apply_markdown=True,
all_public_streams=True,
client_type_name="ZulipMobile",
event_types=["custom_profile_fields"],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=user_profile.realm.id,
user_profile_id=user_profile.id,
pronouns_field_type_supported=pronouns_field_type_supported,
)
client = allocate_client_descriptor(queue_data)
try_update_realm_custom_profile_field(
realm=user_profile.realm, field=profile_field, name=new_name
)
result = self.tornado_call(
get_events,
user_profile,
{
"queue_id": client.event_queue.id,
"user_client": "ZulipAndroid",
"last_event_id": -1,
"dont_block": orjson.dumps(True).decode(),
},
)
events = orjson.loads(result.content)["events"]
self.assert_json_success(result)
self.assert_length(events, 1)
[pronouns_field] = (
field for field in events[0]["fields"] if field["id"] == profile_field.id
)
if pronouns_field_type_supported:
expected_type = CustomProfileField.PRONOUNS
else:
expected_type = CustomProfileField.SHORT_TEXT
self.assertEqual(pronouns_field["type"], expected_type)
check_pronouns_type_field_supported(False, "Pronouns field")
check_pronouns_type_field_supported(True, "Pronouns")
class FetchInitialStateDataTest(ZulipTestCase):
# Non-admin users don't have access to all bots
def test_realm_bots_non_admin(self) -> None:
user_profile = self.example_user("cordelia")
self.assertFalse(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, realm=user_profile.realm)
self.assert_length(result["realm_bots"], 0)
# additionally the API key for a random bot is not present in the data
api_key = get_api_key(self.notification_bot(user_profile.realm))
self.assertNotIn(api_key, str(result))
# Admin users have access to all bots in the realm_bots field
def test_realm_bots_admin(self) -> None:
user_profile = self.example_user("hamlet")
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)
self.assertTrue(user_profile.is_realm_admin)
result = fetch_initial_state_data(user_profile, realm=user_profile.realm)
self.assertGreater(len(result["realm_bots"]), 2)
def test_max_message_id_with_no_history(self) -> None:
user_profile = self.example_user("aaron")
# Delete all historical messages for this user
UserMessage.objects.filter(user_profile=user_profile).delete()
result = fetch_initial_state_data(user_profile, realm=user_profile.realm)
self.assertEqual(result["max_message_id"], -1)
def test_delivery_email_presence_for_non_admins(self) -> None:
user_profile = self.example_user("aaron")
hamlet = self.example_user("hamlet")
self.assertFalse(user_profile.is_realm_admin)
hamlet = self.example_user("hamlet")
do_change_user_setting(
hamlet,
"email_address_visibility",
UserProfile.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
acting_user=None,
)
result = fetch_initial_state_data(user_profile, realm=user_profile.realm)
(hamlet_obj,) = (value for key, value in result["raw_users"].items() if key == hamlet.id)
self.assertEqual(hamlet_obj["delivery_email"], hamlet.delivery_email)
do_change_user_setting(
hamlet,
"email_address_visibility",
UserProfile.EMAIL_ADDRESS_VISIBILITY_ADMINS,
acting_user=None,
)
result = fetch_initial_state_data(user_profile, realm=user_profile.realm)
(hamlet_obj,) = (value for key, value in result["raw_users"].items() if key == hamlet.id)
self.assertIsNone(hamlet_obj["delivery_email"])
def test_delivery_email_presence_for_admins(self) -> None:
user_profile = self.example_user("iago")
hamlet = self.example_user("hamlet")
self.assertTrue(user_profile.is_realm_admin)
hamlet = self.example_user("hamlet")
do_change_user_setting(
hamlet,
"email_address_visibility",
UserProfile.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
acting_user=None,
)
result = fetch_initial_state_data(user_profile, realm=user_profile.realm)
(hamlet_obj,) = (value for key, value in result["raw_users"].items() if key == hamlet.id)
self.assertEqual(hamlet_obj["delivery_email"], hamlet.delivery_email)
do_change_user_setting(
hamlet,
"email_address_visibility",
UserProfile.EMAIL_ADDRESS_VISIBILITY_ADMINS,
acting_user=None,
)
result = fetch_initial_state_data(user_profile, realm=user_profile.realm)
(hamlet_obj,) = (value for key, value in result["raw_users"].items() if key == hamlet.id)
self.assertIn("delivery_email", hamlet_obj)
def test_user_avatar_url_field_optional(self) -> None:
hamlet = self.example_user("hamlet")
users = [
self.example_user("iago"),
self.example_user("cordelia"),
self.example_user("ZOE"),
self.example_user("othello"),
]
for user in users:
user.long_term_idle = True
user.save()
long_term_idle_users_ids = [user.id for user in users]
result = fetch_initial_state_data(
user_profile=hamlet,
realm=hamlet.realm,
user_avatar_url_field_optional=True,
)
raw_users = result["raw_users"]
for user_dict in raw_users.values():
if user_dict["user_id"] in long_term_idle_users_ids:
self.assertFalse("avatar_url" in user_dict)
else:
self.assertIsNotNone(user_dict["avatar_url"])
gravatar_users_id = [
user_dict["user_id"]
for user_dict in raw_users.values()
if "avatar_url" in user_dict
and urlsplit(user_dict["avatar_url"]).hostname == "secure.gravatar.com"
]
reset_email_visibility_to_everyone_in_zulip_realm()
# Test again with client_gravatar = True
result = fetch_initial_state_data(
user_profile=hamlet,
realm=hamlet.realm,
client_gravatar=True,
user_avatar_url_field_optional=True,
)
raw_users = result["raw_users"]
for user_dict in raw_users.values():
if user_dict["user_id"] in gravatar_users_id:
self.assertIsNone(user_dict["avatar_url"])
else:
self.assertFalse("avatar_url" in user_dict)
def test_user_settings_based_on_client_capabilities(self) -> None:
hamlet = self.example_user("hamlet")
result = fetch_initial_state_data(
user_profile=hamlet,
realm=hamlet.realm,
user_settings_object=True,
)
self.assertIn("user_settings", result)
for prop in UserProfile.property_types:
self.assertNotIn(prop, result)
self.assertIn(prop, result["user_settings"])
result = fetch_initial_state_data(
user_profile=hamlet,
realm=hamlet.realm,
user_settings_object=False,
)
self.assertIn("user_settings", result)
for prop in UserProfile.property_types:
if prop in {
**UserProfile.display_settings_legacy,
**UserProfile.notification_settings_legacy,
}:
# Only legacy settings are included in the top level.
self.assertIn(prop, result)
self.assertIn(prop, result["user_settings"])
linkifier: Support URL templates for linkifiers. This swaps out url_format_string from all of our APIs and replaces it with url_template. Note that the documentation changes in the following commits will be squashed with this commit. We change the "url_format" key to "url_template" for the realm_linkifiers events in event_schema, along with updating LinkifierDict. "url_template" is the name chosen to normalize mixed usages of "url_format_string" and "url_format" throughout the backend. The markdown processor is updated to stop handling the format string interpolation and delegate the task template expansion to the uri_template library instead. This change affects many test cases. We mostly just replace "%(name)s" with "{name}", "url_format_string" with "url_template" to make sure that they still pass. There are some test cases dedicated for testing "%" escaping, which aren't relevant anymore and are subject to removal. But for now we keep most of them as-is, and make sure that "%" is always escaped since we do not use it for variable substitution any more. Since url_format_string is not populated anymore, a migration is created to remove this field entirely, and make url_template non-nullable since we will always populate it. Note that it is possible to have url_template being null after migration 0422 and before 0424, but in practice, url_template will not be None after backfilling and the backend now is always setting url_template. With the removal of url_format_string, RealmFilter model will now be cleaned with URL template checks, and the old checks for escapes are removed. We also modified RealmFilter.clean to skip the validation when the url_template is invalid. This avoids raising mulitple ValidationError's when calling full_clean on a linkifier. But we might eventually want to have a more centric approach to data validation instead of having the same validation in both the clean method and the validator. Fixes #23124. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
2022-10-05 20:55:31 +02:00
def test_realm_linkifiers_based_on_client_capabilities(self) -> None:
user = self.example_user("iago")
self.login_user(user)
data = {
"pattern": "#(?P<id>[123])",
"url_template": "https://realm.com/my_realm_filter/{id}",
}
post_result = self.client_post("/json/realm/filters", info=data)
self.assert_json_success(post_result)
result = fetch_initial_state_data(
user_profile=user,
realm=user.realm,
linkifier: Support URL templates for linkifiers. This swaps out url_format_string from all of our APIs and replaces it with url_template. Note that the documentation changes in the following commits will be squashed with this commit. We change the "url_format" key to "url_template" for the realm_linkifiers events in event_schema, along with updating LinkifierDict. "url_template" is the name chosen to normalize mixed usages of "url_format_string" and "url_format" throughout the backend. The markdown processor is updated to stop handling the format string interpolation and delegate the task template expansion to the uri_template library instead. This change affects many test cases. We mostly just replace "%(name)s" with "{name}", "url_format_string" with "url_template" to make sure that they still pass. There are some test cases dedicated for testing "%" escaping, which aren't relevant anymore and are subject to removal. But for now we keep most of them as-is, and make sure that "%" is always escaped since we do not use it for variable substitution any more. Since url_format_string is not populated anymore, a migration is created to remove this field entirely, and make url_template non-nullable since we will always populate it. Note that it is possible to have url_template being null after migration 0422 and before 0424, but in practice, url_template will not be None after backfilling and the backend now is always setting url_template. With the removal of url_format_string, RealmFilter model will now be cleaned with URL template checks, and the old checks for escapes are removed. We also modified RealmFilter.clean to skip the validation when the url_template is invalid. This avoids raising mulitple ValidationError's when calling full_clean on a linkifier. But we might eventually want to have a more centric approach to data validation instead of having the same validation in both the clean method and the validator. Fixes #23124. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
2022-10-05 20:55:31 +02:00
linkifier_url_template=True,
)
self.assertEqual(result["realm_filters"], [])
self.assertEqual(result["realm_linkifiers"][-1]["pattern"], "#(?P<id>[123])")
linkifier: Support URL templates for linkifiers. This swaps out url_format_string from all of our APIs and replaces it with url_template. Note that the documentation changes in the following commits will be squashed with this commit. We change the "url_format" key to "url_template" for the realm_linkifiers events in event_schema, along with updating LinkifierDict. "url_template" is the name chosen to normalize mixed usages of "url_format_string" and "url_format" throughout the backend. The markdown processor is updated to stop handling the format string interpolation and delegate the task template expansion to the uri_template library instead. This change affects many test cases. We mostly just replace "%(name)s" with "{name}", "url_format_string" with "url_template" to make sure that they still pass. There are some test cases dedicated for testing "%" escaping, which aren't relevant anymore and are subject to removal. But for now we keep most of them as-is, and make sure that "%" is always escaped since we do not use it for variable substitution any more. Since url_format_string is not populated anymore, a migration is created to remove this field entirely, and make url_template non-nullable since we will always populate it. Note that it is possible to have url_template being null after migration 0422 and before 0424, but in practice, url_template will not be None after backfilling and the backend now is always setting url_template. With the removal of url_format_string, RealmFilter model will now be cleaned with URL template checks, and the old checks for escapes are removed. We also modified RealmFilter.clean to skip the validation when the url_template is invalid. This avoids raising mulitple ValidationError's when calling full_clean on a linkifier. But we might eventually want to have a more centric approach to data validation instead of having the same validation in both the clean method and the validator. Fixes #23124. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
2022-10-05 20:55:31 +02:00
self.assertEqual(
result["realm_linkifiers"][-1]["url_template"],
linkifier: Support URL templates for linkifiers. This swaps out url_format_string from all of our APIs and replaces it with url_template. Note that the documentation changes in the following commits will be squashed with this commit. We change the "url_format" key to "url_template" for the realm_linkifiers events in event_schema, along with updating LinkifierDict. "url_template" is the name chosen to normalize mixed usages of "url_format_string" and "url_format" throughout the backend. The markdown processor is updated to stop handling the format string interpolation and delegate the task template expansion to the uri_template library instead. This change affects many test cases. We mostly just replace "%(name)s" with "{name}", "url_format_string" with "url_template" to make sure that they still pass. There are some test cases dedicated for testing "%" escaping, which aren't relevant anymore and are subject to removal. But for now we keep most of them as-is, and make sure that "%" is always escaped since we do not use it for variable substitution any more. Since url_format_string is not populated anymore, a migration is created to remove this field entirely, and make url_template non-nullable since we will always populate it. Note that it is possible to have url_template being null after migration 0422 and before 0424, but in practice, url_template will not be None after backfilling and the backend now is always setting url_template. With the removal of url_format_string, RealmFilter model will now be cleaned with URL template checks, and the old checks for escapes are removed. We also modified RealmFilter.clean to skip the validation when the url_template is invalid. This avoids raising mulitple ValidationError's when calling full_clean on a linkifier. But we might eventually want to have a more centric approach to data validation instead of having the same validation in both the clean method and the validator. Fixes #23124. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
2022-10-05 20:55:31 +02:00
"https://realm.com/my_realm_filter/{id}",
)
# The default behavior should be `linkifier_url_template=False`
result = fetch_initial_state_data(
user_profile=user,
realm=user.realm,
linkifier: Support URL templates for linkifiers. This swaps out url_format_string from all of our APIs and replaces it with url_template. Note that the documentation changes in the following commits will be squashed with this commit. We change the "url_format" key to "url_template" for the realm_linkifiers events in event_schema, along with updating LinkifierDict. "url_template" is the name chosen to normalize mixed usages of "url_format_string" and "url_format" throughout the backend. The markdown processor is updated to stop handling the format string interpolation and delegate the task template expansion to the uri_template library instead. This change affects many test cases. We mostly just replace "%(name)s" with "{name}", "url_format_string" with "url_template" to make sure that they still pass. There are some test cases dedicated for testing "%" escaping, which aren't relevant anymore and are subject to removal. But for now we keep most of them as-is, and make sure that "%" is always escaped since we do not use it for variable substitution any more. Since url_format_string is not populated anymore, a migration is created to remove this field entirely, and make url_template non-nullable since we will always populate it. Note that it is possible to have url_template being null after migration 0422 and before 0424, but in practice, url_template will not be None after backfilling and the backend now is always setting url_template. With the removal of url_format_string, RealmFilter model will now be cleaned with URL template checks, and the old checks for escapes are removed. We also modified RealmFilter.clean to skip the validation when the url_template is invalid. This avoids raising mulitple ValidationError's when calling full_clean on a linkifier. But we might eventually want to have a more centric approach to data validation instead of having the same validation in both the clean method and the validator. Fixes #23124. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
2022-10-05 20:55:31 +02:00
)
self.assertEqual(result["realm_filters"], [])
self.assertEqual(result["realm_linkifiers"], [])
def test_pronouns_field_type_support(self) -> None:
hamlet = self.example_user("hamlet")
result = fetch_initial_state_data(
user_profile=hamlet,
realm=hamlet.realm,
pronouns_field_type_supported=False,
)
self.assertIn("custom_profile_fields", result)
custom_profile_fields = result["custom_profile_fields"]
[pronouns_field] = (field for field in custom_profile_fields if field["name"] == "Pronouns")
self.assertEqual(pronouns_field["type"], CustomProfileField.SHORT_TEXT)
result = fetch_initial_state_data(
user_profile=hamlet,
realm=hamlet.realm,
pronouns_field_type_supported=True,
)
self.assertIn("custom_profile_fields", result)
custom_profile_fields = result["custom_profile_fields"]
[pronouns_field] = (field for field in custom_profile_fields if field["name"] == "Pronouns")
self.assertEqual(pronouns_field["type"], CustomProfileField.PRONOUNS)
class ClientDescriptorsTest(ZulipTestCase):
def test_get_client_info_for_all_public_streams(self) -> None:
hamlet = self.example_user("hamlet")
realm = hamlet.realm
queue_data = dict(
all_public_streams=True,
apply_markdown=True,
client_gravatar=True,
client_type_name="website",
event_types=["message"],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name="whatever",
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
self.assert_length(client_info, 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct["client"].apply_markdown, True)
self.assertEqual(dct["client"].client_gravatar, True)
self.assertEqual(dct["client"].user_profile_id, hamlet.id)
self.assertEqual(dct["flags"], [])
self.assertEqual(dct["is_sender"], False)
message_event = dict(
realm_id=realm.id,
stream_name="whatever",
sender_queue_id=client.event_queue.id,
)
client_info = get_client_info_for_message_event(
message_event,
users=[],
)
dct = client_info[client.event_queue.id]
self.assertEqual(dct["is_sender"], True)
def test_get_client_info_for_normal_users(self) -> None:
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
realm = hamlet.realm
def test_get_info(apply_markdown: bool, client_gravatar: bool) -> None:
clear_client_event_queues_for_testing()
queue_data = dict(
all_public_streams=False,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
client_type_name="website",
event_types=["message"],
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
message_event = dict(
realm_id=realm.id,
stream_name="whatever",
)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
],
)
self.assert_length(client_info, 0)
client_info = get_client_info_for_message_event(
message_event,
users=[
dict(id=cordelia.id),
dict(id=hamlet.id, flags=["mentioned"]),
],
)
self.assert_length(client_info, 1)
dct = client_info[client.event_queue.id]
self.assertEqual(dct["client"].apply_markdown, apply_markdown)
self.assertEqual(dct["client"].client_gravatar, client_gravatar)
self.assertEqual(dct["client"].user_profile_id, hamlet.id)
self.assertEqual(dct["flags"], ["mentioned"])
self.assertEqual(dct["is_sender"], False)
test_get_info(apply_markdown=False, client_gravatar=False)
test_get_info(apply_markdown=True, client_gravatar=False)
test_get_info(apply_markdown=False, client_gravatar=True)
test_get_info(apply_markdown=True, client_gravatar=True)
def test_process_message_event_with_mocked_client_info(self) -> None:
hamlet = self.example_user("hamlet")
class MockClient:
def __init__(
self, user_profile_id: int, apply_markdown: bool, client_gravatar: bool
) -> None:
self.user_profile_id = user_profile_id
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.client_type_name = "whatever"
self.events: List[Dict[str, Any]] = []
def accepts_messages(self) -> bool:
return True
def accepts_event(self, event: Dict[str, Any]) -> bool:
assert event["type"] == "message"
return True
def add_event(self, event: Dict[str, Any]) -> None:
self.events.append(event)
client1 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=False,
)
client2 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=False,
)
client3 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=True,
client_gravatar=True,
)
client4 = MockClient(
user_profile_id=hamlet.id,
apply_markdown=False,
client_gravatar=True,
)
client_info = {
"client:1": dict(
client=client1,
flags=["starred"],
),
"client:2": dict(
client=client2,
flags=["has_alert_word"],
),
"client:3": dict(
client=client3,
flags=[],
),
"client:4": dict(
client=client4,
flags=[],
),
}
sender = hamlet
message_event = dict(
message_dict=dict(
id=999,
content="**hello**",
rendered_content="<b>hello</b>",
sender_id=sender.id,
type="stream",
client="website",
# NOTE: Some of these fields are clutter, but some
# will be useful when we let clients specify
# that they can compute their own gravatar URLs.
sender_email=sender.email,
sender_delivery_email=sender.delivery_email,
sender_realm_id=sender.realm_id,
sender_avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
sender_avatar_version=1,
sender_is_mirror_dummy=None,
sender_email_address_visibility=UserProfile.EMAIL_ADDRESS_VISIBILITY_EVERYONE,
recipient_type=None,
recipient_type_id=None,
),
)
# Setting users to `[]` bypasses code we don't care about
# for this test--we assume client_info is correct in our mocks,
# and we are interested in how messages are put on event queue.
users: List[Dict[str, Any]] = []
with mock.patch(
"zerver.tornado.event_queue.get_client_info_for_message_event", return_value=client_info
):
process_message_event(message_event, users)
# We are not closely examining avatar_url at this point, so
# just sanity check them and then delete the keys so that
# upcoming comparisons work.
for client in [client1, client2]:
message = client.events[0]["message"]
self.assertIn("gravatar.com", message["avatar_url"])
message.pop("avatar_url")
self.assertEqual(
client1.events,
[
dict(
type="message",
message=dict(
type="stream",
sender_id=sender.id,
sender_email=sender.email,
id=999,
content="<b>hello</b>",
content_type="text/html",
client="website",
),
flags=["starred"],
),
],
)
self.assertEqual(
client2.events,
[
dict(
type="message",
message=dict(
type="stream",
sender_id=sender.id,
sender_email=sender.email,
id=999,
content="**hello**",
content_type="text/x-markdown",
client="website",
),
flags=["has_alert_word"],
),
],
)
self.assertEqual(
client3.events,
[
dict(
type="message",
message=dict(
type="stream",
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content="<b>hello</b>",
content_type="text/html",
client="website",
),
flags=[],
),
],
)
self.assertEqual(
client4.events,
[
dict(
type="message",
message=dict(
type="stream",
sender_id=sender.id,
sender_email=sender.email,
avatar_url=None,
id=999,
content="**hello**",
content_type="text/x-markdown",
client="website",
),
flags=[],
),
],
)
class ReloadWebClientsTest(ZulipTestCase):
def test_web_reload_clients(self) -> None:
2021-03-21 15:12:24 +01:00
hamlet = self.example_user("hamlet")
realm = hamlet.realm
clear_client_event_queues_for_testing()
queue_data = dict(
all_public_streams=False,
apply_markdown=True,
client_gravatar=True,
client_type_name="website",
event_types=None,
last_connection_time=time.time(),
queue_timeout=0,
realm_id=realm.id,
user_profile_id=hamlet.id,
)
client = allocate_client_descriptor(queue_data)
send_web_reload_client_events()
self.assert_length(client.event_queue.queue, 0)
2021-03-21 15:12:24 +01:00
mark_clients_to_reload([client.event_queue.id])
send_web_reload_client_events()
self.assert_length(client.event_queue.queue, 1)
reload_event = client.event_queue.queue[0]
2021-03-21 15:12:24 +01:00
check_web_reload_client_event("web_reload_client_event", reload_event)
2021-03-21 15:12:24 +01:00
self.assertEqual(
reload_event,
2021-03-21 15:12:24 +01:00
dict(
type="web_reload_client",
immediate=False,
2021-03-21 15:12:24 +01:00
id=0,
),
)
class FetchQueriesTest(ZulipTestCase):
def test_queries(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# Fetch realm like it is done when calling fetch_initial_state_data
# in production to match the query counts with the actual query
# count in production.
realm = get_realm_with_settings(realm_id=user.realm_id)
with self.assert_database_query_count(44):
with mock.patch("zerver.lib.events.always_want") as want_mock:
fetch_initial_state_data(user, realm=realm)
expected_counts = dict(
alert_words=1,
custom_profile_fields=1,
default_streams=1,
default_stream_groups=1,
drafts=1,
message=1,
muted_topics=1,
muted_users=1,
onboarding_steps=1,
presence=1,
realm=1,
realm_bot=1,
realm_domains=1,
realm_embedded_bots=0,
realm_incoming_webhook_bots=0,
realm_emoji=1,
linkifier: Support URL templates for linkifiers. This swaps out url_format_string from all of our APIs and replaces it with url_template. Note that the documentation changes in the following commits will be squashed with this commit. We change the "url_format" key to "url_template" for the realm_linkifiers events in event_schema, along with updating LinkifierDict. "url_template" is the name chosen to normalize mixed usages of "url_format_string" and "url_format" throughout the backend. The markdown processor is updated to stop handling the format string interpolation and delegate the task template expansion to the uri_template library instead. This change affects many test cases. We mostly just replace "%(name)s" with "{name}", "url_format_string" with "url_template" to make sure that they still pass. There are some test cases dedicated for testing "%" escaping, which aren't relevant anymore and are subject to removal. But for now we keep most of them as-is, and make sure that "%" is always escaped since we do not use it for variable substitution any more. Since url_format_string is not populated anymore, a migration is created to remove this field entirely, and make url_template non-nullable since we will always populate it. Note that it is possible to have url_template being null after migration 0422 and before 0424, but in practice, url_template will not be None after backfilling and the backend now is always setting url_template. With the removal of url_format_string, RealmFilter model will now be cleaned with URL template checks, and the old checks for escapes are removed. We also modified RealmFilter.clean to skip the validation when the url_template is invalid. This avoids raising mulitple ValidationError's when calling full_clean on a linkifier. But we might eventually want to have a more centric approach to data validation instead of having the same validation in both the clean method and the validator. Fixes #23124. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
2022-10-05 20:55:31 +02:00
realm_filters=0,
realm_linkifiers=0,
realm_playgrounds=1,
realm_user=4,
realm_user_groups=5,
realm_user_settings_defaults=1,
recent_private_conversations=1,
scheduled_messages=1,
starred_messages=1,
stream=3,
stop_words=0,
subscription=4,
update_display_settings=0,
update_global_notifications=0,
update_message_flags=5,
user_settings=0,
user_status=1,
user_topic=1,
video_calls=0,
giphy=0,
)
wanted_event_types = {item[0][0] for item in want_mock.call_args_list}
self.assertEqual(wanted_event_types, set(expected_counts))
for event_type in sorted(wanted_event_types):
count = expected_counts[event_type]
with self.assert_database_query_count(count):
if event_type == "update_message_flags":
event_types = ["update_message_flags", "message"]
else:
event_types = [event_type]
fetch_initial_state_data(user, realm=realm, event_types=event_types)
class TestEventsRegisterAllPublicStreamsDefaults(ZulipTestCase):
@override
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("hamlet")
self.email = self.user_profile.email
def test_use_passed_all_public_true_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_true_default(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, True)
self.assertTrue(result)
def test_use_passed_all_public_false_default_false(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_passed_all_public_false_default_true(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, False)
self.assertFalse(result)
def test_use_true_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = True
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertTrue(result)
def test_use_false_default_for_none(self) -> None:
self.user_profile.default_all_public_streams = False
self.user_profile.save()
result = _default_all_public_streams(self.user_profile, None)
self.assertFalse(result)
class TestEventsRegisterNarrowDefaults(ZulipTestCase):
@override
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user("hamlet")
self.email = self.user_profile.email
self.stream = get_stream("Verona", self.user_profile.realm)
def test_use_passed_narrow_no_default(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [["stream", "my_stream"]])
self.assertEqual(result, [["stream", "my_stream"]])
def test_use_passed_narrow_with_default(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [["stream", "my_stream"]])
self.assertEqual(result, [["stream", "my_stream"]])
def test_use_default_if_narrow_is_empty(self) -> None:
self.user_profile.default_events_register_stream_id = self.stream.id
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [["stream", "Verona"]])
def test_use_narrow_if_default_is_none(self) -> None:
self.user_profile.default_events_register_stream_id = None
self.user_profile.save()
result = _default_narrow(self.user_profile, [])
self.assertEqual(result, [])
class TestGetUserAPIDataSystemBotRealm(ZulipTestCase):
def test_get_users_api_data_on_system_bot_realm(self) -> None:
realm = get_realm(settings.SYSTEM_BOT_REALM)
result = get_users_for_api(
realm,
self.example_user("hamlet"),
client_gravatar=True,
user_avatar_url_field_optional=True,
)
for bot_email in settings.CROSS_REALM_BOT_EMAILS:
bot_profile = get_system_bot(bot_email, realm.id)
self.assertTrue(bot_profile.id in result)
self.assertTrue(result[bot_profile.id]["is_system_bot"])
class TestUserPresenceUpdatesDisabled(ZulipTestCase):
# For this test, we verify do_update_user_presence doesn't send
# events for organizations with more than
# USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS users, unless
# force_send_update is passed.
@override_settings(USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS=3)
def test_presence_events_disabled_on_larger_realm(self) -> None:
with self.capture_send_event_calls(expected_num_events=1):
do_update_user_presence(
self.example_user("cordelia"),
get_client("website"),
timezone_now(),
presence: Rewrite the backend data model. This implements the core of the rewrite described in: For the backend data model for UserPresence to one that supports much more efficient queries and is more correct around handling of multiple clients. The main loss of functionality is that we no longer track which Client sent presence data (so we will no longer be able to say using UserPresence "the user was last online on their desktop 15 minutes ago, but was online with their phone 3 minutes ago"). If we consider that information important for the occasional investigation query, we have can construct that answer data via UserActivity already. It's not worth making Presence much more expensive/complex to support it. For slim_presence clients, this sends the same data format we sent before, albeit with less complexity involved in constructing it. Note that we at present will always send both last_active_time and last_connected_time; we may revisit that in the future. This commit doesn't include the finalizing migration, which drops the UserPresenceOld table. The way to deploy is to start the backfill migration with the server down and then start the server *without* the user_presence queue worker, to let the migration finish without having new data interfering with it. Once the migration is done, the queue worker can be started, leading to the presence data catching up to the current state as the queue worker goes over the queued up events and updating the UserPresence table. Co-authored-by: Mateusz Mandera <mateusz.mandera@zulip.com>
2020-06-11 16:03:47 +02:00
UserPresence.LEGACY_STATUS_ACTIVE_INT,
force_send_update=True,
)
with self.capture_send_event_calls(expected_num_events=0):
do_update_user_presence(
self.example_user("hamlet"),
get_client("website"),
timezone_now(),
presence: Rewrite the backend data model. This implements the core of the rewrite described in: For the backend data model for UserPresence to one that supports much more efficient queries and is more correct around handling of multiple clients. The main loss of functionality is that we no longer track which Client sent presence data (so we will no longer be able to say using UserPresence "the user was last online on their desktop 15 minutes ago, but was online with their phone 3 minutes ago"). If we consider that information important for the occasional investigation query, we have can construct that answer data via UserActivity already. It's not worth making Presence much more expensive/complex to support it. For slim_presence clients, this sends the same data format we sent before, albeit with less complexity involved in constructing it. Note that we at present will always send both last_active_time and last_connected_time; we may revisit that in the future. This commit doesn't include the finalizing migration, which drops the UserPresenceOld table. The way to deploy is to start the backfill migration with the server down and then start the server *without* the user_presence queue worker, to let the migration finish without having new data interfering with it. Once the migration is done, the queue worker can be started, leading to the presence data catching up to the current state as the queue worker goes over the queued up events and updating the UserPresence table. Co-authored-by: Mateusz Mandera <mateusz.mandera@zulip.com>
2020-06-11 16:03:47 +02:00
UserPresence.LEGACY_STATUS_ACTIVE_INT,
force_send_update=False,
)