zulip/zerver/lib/rest.py

178 lines
7.7 KiB
Python
Raw Normal View History

2019-07-25 21:03:35 +02:00
from functools import wraps
from typing import Any, Callable, Dict, Mapping, Set, Tuple, Union, cast
from django.http import HttpRequest, HttpResponse
from django.urls import path
from django.urls.resolvers import URLPattern
2019-07-25 21:03:35 +02:00
from django.utils.cache import add_never_cache_headers
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from zerver.decorator import (
authenticated_json_view,
authenticated_rest_api_view,
authenticated_uploads_api_view,
process_as_post,
)
from zerver.lib.exceptions import MissingAuthenticationError
from zerver.lib.request import get_request_notes
from zerver.lib.response import json_method_not_allowed
from zerver.lib.types import ViewFuncT
METHODS = ("GET", "HEAD", "POST", "PUT", "DELETE", "PATCH")
FLAGS = "override_api_url_scheme"
def default_never_cache_responses(view_func: ViewFuncT) -> ViewFuncT:
"""Patched version of the standard Django never_cache_responses
decorator that adds headers to a response so that it will never be
cached, unless the view code has already set a Cache-Control
header.
2019-07-25 21:03:35 +02:00
"""
2019-07-25 21:03:35 +02:00
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
2019-07-25 21:03:35 +02:00
response = view_func(request, *args, **kwargs)
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
if response.has_header("Cache-Control"):
return response
add_never_cache_headers(response)
2019-07-25 21:03:35 +02:00
return response
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
2019-07-25 21:03:35 +02:00
@default_never_cache_responses
@csrf_exempt
def rest_dispatch(request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Dispatch to a REST API endpoint.
Unauthenticated endpoints should not use this, as authentication is verified
in the following ways:
* for paths beginning with /api, HTTP basic auth
* for paths beginning with /json (used by the web client), the session token
This calls the function named in kwargs[request.method], if that request
method is supported, and after wrapping that function to:
* protect against CSRF (if the user is already authenticated through
a Django session)
* authenticate via an API key (otherwise)
* coerce PUT/PATCH/DELETE into having POST-like semantics for
retrieving variables
Any keyword args that are *not* HTTP methods are passed through to the
target function.
Never make a urls.py pattern put user input into a variable called GET, POST,
etc, as that is where we route HTTP verbs to target functions.
"""
python: Convert assignment type annotations to Python 3.6 style. This commit was split by tabbott; this piece covers the vast majority of files in Zulip, but excludes scripts/, tools/, and puppet/ to help ensure we at least show the right error messages for Xenial systems. We can likely further refine the remaining pieces with some testing. Generated by com2ann, with whitespace fixes and various manual fixes for runtime issues: - invoiced_through: Optional[LicenseLedger] = models.ForeignKey( + invoiced_through: Optional["LicenseLedger"] = models.ForeignKey( -_apns_client: Optional[APNsClient] = None +_apns_client: Optional["APNsClient"] = None - notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) - signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) + notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) + signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) - author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE) + author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE) - bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) + bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) - default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) - default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) + default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) + default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) -descriptors_by_handler_id: Dict[int, ClientDescriptor] = {} +descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {} -worker_classes: Dict[str, Type[QueueProcessingWorker]] = {} -queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {} +worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {} +queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {} -AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None +AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
supported_methods: Dict[str, Any] = {}
request_notes = get_request_notes(request)
if request_notes.saved_response is not None:
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
# For completing long-polled Tornado requests, we skip the
# view function logic and just return the response.
return request_notes.saved_response
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
# duplicate kwargs so we can mutate the original as we go
for arg in list(kwargs):
if arg in METHODS:
supported_methods[arg] = kwargs[arg]
del kwargs[arg]
if "GET" in supported_methods:
supported_methods.setdefault("HEAD", supported_methods["GET"])
if request.method == "OPTIONS":
response = HttpResponse(status=204) # No content
response["Allow"] = ", ".join(sorted(supported_methods.keys()))
return response
# Override requested method if magic method=??? parameter exists
method_to_use = request.method
if request.POST and "method" in request.POST:
method_to_use = request.POST["method"]
if method_to_use in supported_methods:
entry = supported_methods[method_to_use]
if isinstance(entry, tuple):
target_function, view_flags = entry
else:
target_function = supported_methods[method_to_use]
view_flags = set()
# Set request_notes.query for update_activity_user(), which is called
# by some of the later wrappers.
request_notes.query = target_function.__name__
# We want to support authentication by both cookies (web client)
# and API keys (API clients). In the former case, we want to
# do a check to ensure that CSRF etc is honored, but in the latter
# we can skip all of that.
#
# Security implications of this portion of the code are minimal,
# as we should worst-case fail closed if we miscategorise a request.
# for some special views (e.g. serving a file that has been
# uploaded), we support using the same URL for web and API clients.
if (
"override_api_url_scheme" in view_flags
and request.META.get("HTTP_AUTHORIZATION", None) is not None
):
# This request uses standard API based authentication.
# For override_api_url_scheme views, we skip our normal
# rate limiting, because there are good reasons clients
# might need to (e.g.) request a large number of uploaded
# files or avatars in quick succession.
target_function = authenticated_rest_api_view(skip_rate_limiting=True)(target_function)
elif "override_api_url_scheme" in view_flags and request.GET.get("api_key") is not None:
# This request uses legacy API authentication. We
# unfortunately need that in the React Native mobile apps,
# because there's no way to set HTTP_AUTHORIZATION in
# React Native. See last block for rate limiting notes.
target_function = authenticated_uploads_api_view(skip_rate_limiting=True)(
target_function
)
# /json views (web client) validate with a session token (cookie)
elif not request.path.startswith("/api") and request.user.is_authenticated:
# Authenticated via sessions framework, only CSRF check needed
auth_kwargs = {}
if "override_api_url_scheme" in view_flags:
auth_kwargs["skip_rate_limiting"] = True
target_function = csrf_protect(authenticated_json_view(target_function, **auth_kwargs))
# most clients (mobile, bots, etc) use HTTP basic auth and REST calls, where instead of
# username:password, we use email:apiKey
elif request.META.get("HTTP_AUTHORIZATION", None):
# Wrap function with decorator to authenticate the user before
# proceeding
target_function = authenticated_rest_api_view(
allow_webhook_access="allow_incoming_webhooks" in view_flags,
)(target_function)
elif request.path.startswith("/json") and "allow_anonymous_user_web" in view_flags:
# For endpoints that support anonymous web access, we do that.
# TODO: Allow /api calls when this is stable enough.
auth_kwargs = dict(allow_unauthenticated=True)
target_function = csrf_protect(authenticated_json_view(target_function, **auth_kwargs))
else:
# Otherwise, throw an authentication error; our middleware
# will generate the appropriate HTTP response.
raise MissingAuthenticationError()
if request.method not in ["GET", "POST"]:
# process_as_post needs to be the outer decorator, because
# otherwise we might access and thus cache a value for
# request.REQUEST.
target_function = process_as_post(target_function)
return target_function(request, **kwargs)
return json_method_not_allowed(list(supported_methods.keys()))
def rest_path(
route: str,
kwargs: Mapping[str, object] = {},
**handlers: Union[Callable[..., HttpResponse], Tuple[Callable[..., HttpResponse], Set[str]]],
) -> URLPattern:
return path(route, rest_dispatch, {**kwargs, **handlers})