2017-11-16 00:53:11 +01:00
|
|
|
import time
|
2022-07-14 21:11:26 +02:00
|
|
|
from typing import Any, Callable, Mapping, Optional, Sequence, TypeVar
|
2013-01-08 17:44:22 +01:00
|
|
|
|
2022-03-18 08:34:10 +01:00
|
|
|
from asgiref.sync import async_to_sync
|
2022-09-22 22:09:34 +02:00
|
|
|
from django.conf import settings
|
2017-11-16 00:53:11 +01:00
|
|
|
from django.http import HttpRequest, HttpResponse
|
2021-04-16 00:57:30 +02:00
|
|
|
from django.utils.translation import gettext as _
|
2024-02-08 20:57:16 +01:00
|
|
|
from pydantic import Json
|
2022-06-24 09:58:29 +02:00
|
|
|
from typing_extensions import ParamSpec
|
2013-01-23 22:25:22 +01:00
|
|
|
|
2024-02-08 20:40:39 +01:00
|
|
|
from zerver.decorator import internal_api_view, process_client
|
2021-06-30 18:35:50 +02:00
|
|
|
from zerver.lib.exceptions import JsonableError
|
2022-09-22 22:09:34 +02:00
|
|
|
from zerver.lib.queue import get_queue_client
|
2021-08-21 19:24:20 +02:00
|
|
|
from zerver.lib.request import REQ, RequestNotes, has_request_variables
|
2022-05-27 01:06:39 +02:00
|
|
|
from zerver.lib.response import AsynchronousResponse, json_success
|
2024-02-08 20:57:16 +01:00
|
|
|
from zerver.lib.typed_endpoint import typed_endpoint
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.validator import (
|
|
|
|
check_bool,
|
2022-07-14 21:11:26 +02:00
|
|
|
check_dict,
|
2020-06-11 00:54:34 +02:00
|
|
|
check_int,
|
|
|
|
check_list,
|
|
|
|
check_string,
|
|
|
|
to_non_negative_int,
|
|
|
|
)
|
2023-12-15 04:33:19 +01:00
|
|
|
from zerver.models import Client, UserProfile
|
|
|
|
from zerver.models.clients import get_client
|
2023-12-15 01:16:00 +01:00
|
|
|
from zerver.models.users import get_user_profile_by_id
|
2022-09-22 22:09:34 +02:00
|
|
|
from zerver.tornado.descriptors import is_current_port
|
2024-02-08 20:57:16 +01:00
|
|
|
from zerver.tornado.event_queue import (
|
|
|
|
access_client_descriptor,
|
|
|
|
fetch_events,
|
|
|
|
process_notification,
|
|
|
|
send_web_reload_client_events,
|
|
|
|
)
|
2022-09-22 22:09:34 +02:00
|
|
|
from zerver.tornado.sharding import get_user_tornado_port, notify_tornado_queue_name
|
2020-06-11 00:54:34 +02:00
|
|
|
|
2022-06-24 09:58:29 +02:00
|
|
|
P = ParamSpec("P")
|
2022-03-18 08:34:10 +01:00
|
|
|
T = TypeVar("T")
|
|
|
|
|
|
|
|
|
2022-06-24 09:58:29 +02:00
|
|
|
def in_tornado_thread(f: Callable[P, T]) -> Callable[P, T]:
|
|
|
|
async def wrapped(*args: P.args, **kwargs: P.kwargs) -> T:
|
|
|
|
return f(*args, **kwargs)
|
2022-03-18 08:34:10 +01:00
|
|
|
|
2022-06-24 09:58:29 +02:00
|
|
|
return async_to_sync(wrapped)
|
2022-03-18 08:34:10 +01:00
|
|
|
|
2013-01-08 17:44:22 +01:00
|
|
|
|
2024-02-08 20:40:39 +01:00
|
|
|
@internal_api_view(True)
|
2022-07-14 21:11:26 +02:00
|
|
|
@has_request_variables
|
|
|
|
def notify(
|
|
|
|
request: HttpRequest, data: Mapping[str, Any] = REQ(json_validator=check_dict([]))
|
|
|
|
) -> HttpResponse:
|
2024-02-08 20:10:25 +01:00
|
|
|
# Only the puppeteer full-stack tests use this endpoint; it
|
|
|
|
# injects an event, as if read from RabbitMQ.
|
2022-07-14 21:11:26 +02:00
|
|
|
in_tornado_thread(process_notification)(data)
|
2022-01-31 13:44:02 +01:00
|
|
|
return json_success(request)
|
2013-01-08 17:44:22 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-02-08 20:57:16 +01:00
|
|
|
@internal_api_view(True)
|
|
|
|
@typed_endpoint
|
|
|
|
def web_reload_clients(
|
|
|
|
request: HttpRequest,
|
|
|
|
*,
|
|
|
|
client_count: Optional[Json[int]] = None,
|
|
|
|
immediate: Json[bool] = False,
|
|
|
|
) -> HttpResponse:
|
|
|
|
sent_events = in_tornado_thread(send_web_reload_client_events)(
|
|
|
|
immediate=immediate, count=client_count
|
|
|
|
)
|
|
|
|
return json_success(
|
|
|
|
request,
|
|
|
|
{
|
|
|
|
"sent_events": sent_events,
|
|
|
|
"complete": client_count is None or client_count != sent_events,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2013-11-19 23:11:30 +01:00
|
|
|
@has_request_variables
|
2021-02-12 08:19:30 +01:00
|
|
|
def cleanup_event_queue(
|
|
|
|
request: HttpRequest, user_profile: UserProfile, queue_id: str = REQ()
|
|
|
|
) -> HttpResponse:
|
2021-08-21 19:24:20 +02:00
|
|
|
log_data = RequestNotes.get_notes(request).log_data
|
2021-07-09 10:06:04 +02:00
|
|
|
assert log_data is not None
|
|
|
|
log_data["extra"] = f"[{queue_id}]"
|
2022-09-22 22:09:34 +02:00
|
|
|
|
|
|
|
user_port = get_user_tornado_port(user_profile)
|
|
|
|
if not is_current_port(user_port):
|
|
|
|
# X-Accel-Redirect is not supported for HTTP DELETE requests,
|
|
|
|
# so we notify the shard hosting the acting user's queues via
|
|
|
|
# enqueuing a special event.
|
|
|
|
#
|
|
|
|
# TODO: Because we return a 200 before confirming that the
|
|
|
|
# event queue had been actually deleted by the process hosting
|
|
|
|
# the queue, there's a race where a `GET /events` request can
|
|
|
|
# succeed after getting a 200 from this endpoint.
|
|
|
|
assert settings.USING_RABBITMQ
|
|
|
|
get_queue_client().json_publish(
|
|
|
|
notify_tornado_queue_name(user_port),
|
|
|
|
{"users": [user_profile.id], "event": {"type": "cleanup_queue", "queue_id": queue_id}},
|
|
|
|
)
|
|
|
|
return json_success(request)
|
|
|
|
|
|
|
|
client = access_client_descriptor(user_profile.id, queue_id)
|
2022-06-24 09:58:29 +02:00
|
|
|
in_tornado_thread(client.cleanup)()
|
2022-01-31 13:44:02 +01:00
|
|
|
return json_success(request)
|
2013-11-19 23:11:30 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2024-02-08 20:40:39 +01:00
|
|
|
@internal_api_view(True)
|
2018-07-13 12:58:16 +02:00
|
|
|
@has_request_variables
|
2021-02-12 08:19:30 +01:00
|
|
|
def get_events_internal(
|
2021-04-07 22:00:44 +02:00
|
|
|
request: HttpRequest, user_profile_id: int = REQ(json_validator=check_int)
|
2021-02-12 08:19:30 +01:00
|
|
|
) -> HttpResponse:
|
2018-07-13 12:58:16 +02:00
|
|
|
user_profile = get_user_profile_by_id(user_profile_id)
|
2023-06-20 22:52:31 +02:00
|
|
|
RequestNotes.get_notes(request).requester_for_logs = user_profile.format_requester_for_logs()
|
2022-09-22 22:09:34 +02:00
|
|
|
assert is_current_port(get_user_tornado_port(user_profile))
|
|
|
|
|
2018-07-13 12:58:16 +02:00
|
|
|
process_client(request, user_profile, client_name="internal")
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
return get_events_backend(request, user_profile)
|
2018-07-13 12:58:16 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
def get_events(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
|
2022-09-22 22:09:34 +02:00
|
|
|
user_port = get_user_tornado_port(user_profile)
|
|
|
|
if not is_current_port(user_port):
|
|
|
|
# When a single realm is split across multiple Tornado shards,
|
|
|
|
# any `GET /events` requests that are routed to the wrong
|
|
|
|
# shard are redirected to the shard hosting the relevant
|
|
|
|
# user's queues. We use X-Accel-Redirect for this purpose,
|
|
|
|
# which is efficient and keeps this redirect invisible to
|
|
|
|
# clients.
|
|
|
|
return HttpResponse(
|
2022-12-09 01:23:05 +01:00
|
|
|
"",
|
|
|
|
headers={"X-Accel-Redirect": f"/internal/tornado/{user_port}{request.get_full_path()}"},
|
2022-09-22 22:09:34 +02:00
|
|
|
)
|
|
|
|
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
return get_events_backend(request, user_profile)
|
2018-07-13 13:10:12 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2013-03-26 18:06:00 +01:00
|
|
|
@has_request_variables
|
2021-02-12 08:19:30 +01:00
|
|
|
def get_events_backend(
|
|
|
|
request: HttpRequest,
|
|
|
|
user_profile: UserProfile,
|
|
|
|
# user_client is intended only for internal Django=>Tornado requests
|
|
|
|
# and thus shouldn't be documented for external use.
|
|
|
|
user_client: Optional[Client] = REQ(
|
2022-01-11 10:10:56 +01:00
|
|
|
converter=lambda var_name, s: get_client(s), default=None, intentionally_undocumented=True
|
2021-02-12 08:19:30 +01:00
|
|
|
),
|
2022-01-11 09:44:20 +01:00
|
|
|
last_event_id: Optional[int] = REQ(json_validator=check_int, default=None),
|
2021-02-12 08:19:30 +01:00
|
|
|
queue_id: Optional[str] = REQ(default=None),
|
|
|
|
# apply_markdown, client_gravatar, all_public_streams, and various
|
|
|
|
# other parameters are only used when registering a new queue via this
|
|
|
|
# endpoint. This is a feature used primarily by get_events_internal
|
|
|
|
# and not expected to be used by third-party clients.
|
|
|
|
apply_markdown: bool = REQ(
|
2021-04-07 22:00:44 +02:00
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
2021-02-12 08:19:30 +01:00
|
|
|
),
|
|
|
|
client_gravatar: bool = REQ(
|
2021-04-07 22:00:44 +02:00
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
|
|
|
),
|
|
|
|
slim_presence: bool = REQ(
|
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
2021-02-12 08:19:30 +01:00
|
|
|
),
|
|
|
|
all_public_streams: bool = REQ(
|
2021-04-07 22:00:44 +02:00
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
2021-02-12 08:19:30 +01:00
|
|
|
),
|
|
|
|
event_types: Optional[Sequence[str]] = REQ(
|
2021-04-07 22:00:44 +02:00
|
|
|
default=None, json_validator=check_list(check_string), intentionally_undocumented=True
|
2021-02-12 08:19:30 +01:00
|
|
|
),
|
2021-04-07 22:00:44 +02:00
|
|
|
dont_block: bool = REQ(default=False, json_validator=check_bool),
|
2021-04-30 00:15:33 +02:00
|
|
|
narrow: Sequence[Sequence[str]] = REQ(
|
2021-04-07 22:00:44 +02:00
|
|
|
default=[],
|
|
|
|
json_validator=check_list(check_list(check_string)),
|
|
|
|
intentionally_undocumented=True,
|
2021-02-12 08:19:30 +01:00
|
|
|
),
|
|
|
|
lifespan_secs: int = REQ(
|
|
|
|
default=0, converter=to_non_negative_int, intentionally_undocumented=True
|
|
|
|
),
|
|
|
|
bulk_message_deletion: bool = REQ(
|
2021-04-07 22:00:44 +02:00
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
2021-02-12 08:19:30 +01:00
|
|
|
),
|
2021-04-18 18:12:35 +02:00
|
|
|
stream_typing_notifications: bool = REQ(
|
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
|
|
|
),
|
2021-07-24 19:51:25 +02:00
|
|
|
user_settings_object: bool = REQ(
|
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
|
|
|
),
|
2022-10-27 19:05:10 +02:00
|
|
|
pronouns_field_type_supported: bool = REQ(
|
|
|
|
default=True, json_validator=check_bool, intentionally_undocumented=True
|
|
|
|
),
|
linkifier: Support URL templates for linkifiers.
This swaps out url_format_string from all of our APIs and replaces it
with url_template. Note that the documentation changes in the following
commits will be squashed with this commit.
We change the "url_format" key to "url_template" for the
realm_linkifiers events in event_schema, along with updating
LinkifierDict. "url_template" is the name chosen to normalize
mixed usages of "url_format_string" and "url_format" throughout
the backend.
The markdown processor is updated to stop handling the format string
interpolation and delegate the task template expansion to the uri_template
library instead.
This change affects many test cases. We mostly just replace "%(name)s"
with "{name}", "url_format_string" with "url_template" to make sure that
they still pass. There are some test cases dedicated for testing "%"
escaping, which aren't relevant anymore and are subject to removal.
But for now we keep most of them as-is, and make sure that "%" is always
escaped since we do not use it for variable substitution any more.
Since url_format_string is not populated anymore, a migration is created
to remove this field entirely, and make url_template non-nullable since
we will always populate it. Note that it is possible to have
url_template being null after migration 0422 and before 0424, but
in practice, url_template will not be None after backfilling and the
backend now is always setting url_template.
With the removal of url_format_string, RealmFilter model will now be cleaned
with URL template checks, and the old checks for escapes are removed.
We also modified RealmFilter.clean to skip the validation when the
url_template is invalid. This avoids raising mulitple ValidationError's
when calling full_clean on a linkifier. But we might eventually want to
have a more centric approach to data validation instead of having
the same validation in both the clean method and the validator.
Fixes #23124.
Signed-off-by: Zixuan James Li <p359101898@gmail.com>
2022-10-05 20:55:31 +02:00
|
|
|
linkifier_url_template: bool = REQ(
|
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
|
|
|
),
|
2023-10-24 19:47:39 +02:00
|
|
|
user_list_incomplete: bool = REQ(
|
|
|
|
default=False, json_validator=check_bool, intentionally_undocumented=True
|
|
|
|
),
|
2021-02-12 08:19:30 +01:00
|
|
|
) -> HttpResponse:
|
2021-04-08 22:14:31 +02:00
|
|
|
if all_public_streams and not user_profile.can_access_public_streams():
|
2021-06-30 18:35:50 +02:00
|
|
|
raise JsonableError(_("User not authorized for this query"))
|
2021-04-08 22:14:31 +02:00
|
|
|
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
# Extract the Tornado handler from the request
|
2022-06-24 10:20:46 +02:00
|
|
|
handler_id = RequestNotes.get_notes(request).tornado_handler_id
|
|
|
|
assert handler_id is not None
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
|
2013-05-07 17:25:25 +02:00
|
|
|
if user_client is None:
|
2021-08-21 19:24:20 +02:00
|
|
|
valid_user_client = RequestNotes.get_notes(request).client
|
2021-07-09 18:10:51 +02:00
|
|
|
assert valid_user_client is not None
|
2018-01-08 19:30:23 +01:00
|
|
|
else:
|
|
|
|
valid_user_client = user_client
|
2013-05-07 17:25:25 +02:00
|
|
|
|
2022-06-24 10:03:36 +02:00
|
|
|
new_queue_data = None
|
2014-01-28 18:11:08 +01:00
|
|
|
if queue_id is None:
|
2022-06-24 10:03:36 +02:00
|
|
|
new_queue_data = dict(
|
2021-02-12 08:19:30 +01:00
|
|
|
user_profile_id=user_profile.id,
|
|
|
|
realm_id=user_profile.realm_id,
|
|
|
|
event_types=event_types,
|
|
|
|
client_type_name=valid_user_client.name,
|
|
|
|
apply_markdown=apply_markdown,
|
|
|
|
client_gravatar=client_gravatar,
|
|
|
|
slim_presence=slim_presence,
|
|
|
|
all_public_streams=all_public_streams,
|
|
|
|
queue_timeout=lifespan_secs,
|
|
|
|
last_connection_time=time.time(),
|
|
|
|
narrow=narrow,
|
|
|
|
bulk_message_deletion=bulk_message_deletion,
|
2021-04-18 18:12:35 +02:00
|
|
|
stream_typing_notifications=stream_typing_notifications,
|
2021-07-24 19:51:25 +02:00
|
|
|
user_settings_object=user_settings_object,
|
2022-10-27 19:05:10 +02:00
|
|
|
pronouns_field_type_supported=pronouns_field_type_supported,
|
linkifier: Support URL templates for linkifiers.
This swaps out url_format_string from all of our APIs and replaces it
with url_template. Note that the documentation changes in the following
commits will be squashed with this commit.
We change the "url_format" key to "url_template" for the
realm_linkifiers events in event_schema, along with updating
LinkifierDict. "url_template" is the name chosen to normalize
mixed usages of "url_format_string" and "url_format" throughout
the backend.
The markdown processor is updated to stop handling the format string
interpolation and delegate the task template expansion to the uri_template
library instead.
This change affects many test cases. We mostly just replace "%(name)s"
with "{name}", "url_format_string" with "url_template" to make sure that
they still pass. There are some test cases dedicated for testing "%"
escaping, which aren't relevant anymore and are subject to removal.
But for now we keep most of them as-is, and make sure that "%" is always
escaped since we do not use it for variable substitution any more.
Since url_format_string is not populated anymore, a migration is created
to remove this field entirely, and make url_template non-nullable since
we will always populate it. Note that it is possible to have
url_template being null after migration 0422 and before 0424, but
in practice, url_template will not be None after backfilling and the
backend now is always setting url_template.
With the removal of url_format_string, RealmFilter model will now be cleaned
with URL template checks, and the old checks for escapes are removed.
We also modified RealmFilter.clean to skip the validation when the
url_template is invalid. This avoids raising mulitple ValidationError's
when calling full_clean on a linkifier. But we might eventually want to
have a more centric approach to data validation instead of having
the same validation in both the clean method and the validator.
Fixes #23124.
Signed-off-by: Zixuan James Li <p359101898@gmail.com>
2022-10-05 20:55:31 +02:00
|
|
|
linkifier_url_template=linkifier_url_template,
|
2023-10-24 19:47:39 +02:00
|
|
|
user_list_incomplete=user_list_incomplete,
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2014-01-28 18:11:08 +01:00
|
|
|
|
2022-06-24 10:03:36 +02:00
|
|
|
result = in_tornado_thread(fetch_events)(
|
|
|
|
user_profile_id=user_profile.id,
|
|
|
|
queue_id=queue_id,
|
|
|
|
last_event_id=last_event_id,
|
|
|
|
client_type_name=valid_user_client.name,
|
|
|
|
dont_block=dont_block,
|
2022-06-24 10:20:46 +02:00
|
|
|
handler_id=handler_id,
|
2022-06-24 10:03:36 +02:00
|
|
|
new_queue_data=new_queue_data,
|
|
|
|
)
|
2014-01-28 20:03:05 +01:00
|
|
|
if "extra_log_data" in result:
|
2021-08-21 19:24:20 +02:00
|
|
|
log_data = RequestNotes.get_notes(request).log_data
|
2021-07-09 10:06:04 +02:00
|
|
|
assert log_data is not None
|
|
|
|
log_data["extra"] = result["extra_log_data"]
|
2014-01-28 20:03:05 +01:00
|
|
|
|
|
|
|
if result["type"] == "async":
|
2022-05-27 01:06:39 +02:00
|
|
|
# Return an AsynchronousResponse; this will result in
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
# Tornado discarding the response and instead long-polling the
|
|
|
|
# request. See zulip_finish for more design details.
|
2022-05-27 01:06:39 +02:00
|
|
|
return AsynchronousResponse()
|
2014-01-28 20:03:05 +01:00
|
|
|
if result["type"] == "error":
|
2017-07-25 22:17:55 +02:00
|
|
|
raise result["exception"]
|
2022-01-31 13:44:02 +01:00
|
|
|
return json_success(request, data=result["response"])
|