zulip/zerver/middleware.py

745 lines
31 KiB
Python
Raw Normal View History

import cProfile
import logging
import tempfile
import time
import traceback
from typing import Any, Callable, Dict, List, MutableMapping, Optional, Tuple
from urllib.parse import urlencode, urljoin
from django.conf import settings
from django.conf.urls.i18n import is_language_prefix_patterns_used
from django.db import connection
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.middleware.locale import LocaleMiddleware as DjangoLocaleMiddleware
from django.shortcuts import render
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare
from django.utils.deprecation import MiddlewareMixin
from django.utils.log import log_response
from django.utils.translation import gettext as _
from django.views.csrf import csrf_failure as html_csrf_failure
from django_scim.middleware import SCIMAuthCheckMiddleware
from django_scim.settings import scim_settings
from sentry_sdk import capture_exception, set_tag
from sentry_sdk.integrations.logging import ignore_logger
from typing_extensions import Concatenate, ParamSpec
from zerver.lib.cache import get_remote_cache_requests, get_remote_cache_time
from zerver.lib.db import reset_queries
from zerver.lib.debug import maybe_tracemalloc_listen
from zerver.lib.exceptions import ErrorCode, JsonableError, MissingAuthenticationError
from zerver.lib.html_to_text import get_content_description
from zerver.lib.markdown import get_markdown_requests, get_markdown_time
per-request caches: Add per_request_cache library. We have historically cached two types of values on a per-request basis inside of memory: * linkifiers * display recipients Both of these caches were hand-written, and they both actually cache values that are also in memcached, so the per-request cache essentially only saves us from a few memcached hits. I think the linkifier per-request cache is a necessary evil. It's an important part of message rendering, and it's not super easy to structure the code to just get a single value up front and pass it down the stack. I'm not so sure we even need the display recipient per-request cache any more, as we are generally pretty smart now about hydrating recipient data in terms of how the code is organized. But I haven't done thorough research on that hypotheseis. Fortunately, it's not rocket science to just write a glorified memoize decorator and tie it into key places in the code: * middleware * tests (e.g. asserting db counts) * queue processors That's what I did in this commit. This commit definitely reduces the amount of code to maintain. I think it also gets us closer to possibly phasing out this whole technique, but that effort is beyond the scope of this PR. We could add some instrumentation to the decorator to see how often we get a non-trivial number of saved round trips to memcached. Note that when we flush linkifiers, we just use a big hammer and flush the entire per-request cache for linkifiers, since there is only ever one realm in the cache.
2023-07-14 19:46:50 +02:00
from zerver.lib.per_request_cache import flush_per_request_caches
from zerver.lib.rate_limiter import RateLimitResult
from zerver.lib.request import REQ, RequestNotes, has_request_variables
from zerver.lib.response import (
AsynchronousResponse,
json_response,
json_response_from_error,
json_unauthorized,
)
from zerver.lib.subdomains import get_subdomain
from zerver.lib.user_agent import parse_user_agent
per-request caches: Add per_request_cache library. We have historically cached two types of values on a per-request basis inside of memory: * linkifiers * display recipients Both of these caches were hand-written, and they both actually cache values that are also in memcached, so the per-request cache essentially only saves us from a few memcached hits. I think the linkifier per-request cache is a necessary evil. It's an important part of message rendering, and it's not super easy to structure the code to just get a single value up front and pass it down the stack. I'm not so sure we even need the display recipient per-request cache any more, as we are generally pretty smart now about hydrating recipient data in terms of how the code is organized. But I haven't done thorough research on that hypotheseis. Fortunately, it's not rocket science to just write a glorified memoize decorator and tie it into key places in the code: * middleware * tests (e.g. asserting db counts) * queue processors That's what I did in this commit. This commit definitely reduces the amount of code to maintain. I think it also gets us closer to possibly phasing out this whole technique, but that effort is beyond the scope of this PR. We could add some instrumentation to the decorator to see how often we get a non-trivial number of saved round trips to memcached. Note that when we flush linkifiers, we just use a big hammer and flush the entire per-request cache for linkifiers, since there is only ever one realm in the cache.
2023-07-14 19:46:50 +02:00
from zerver.models import Realm, get_realm
ParamT = ParamSpec("ParamT")
logger = logging.getLogger("zulip.requests")
slow_query_logger = logging.getLogger("zulip.slow_queries")
def record_request_stop_data(log_data: MutableMapping[str, Any]) -> None:
log_data["time_stopped"] = time.time()
log_data["remote_cache_time_stopped"] = get_remote_cache_time()
log_data["remote_cache_requests_stopped"] = get_remote_cache_requests()
log_data["markdown_time_stopped"] = get_markdown_time()
log_data["markdown_requests_stopped"] = get_markdown_requests()
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
def async_request_timer_stop(request: HttpRequest) -> None:
log_data = RequestNotes.get_notes(request).log_data
assert log_data is not None
record_request_stop_data(log_data)
def record_request_restart_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].enable()
log_data["time_restarted"] = time.time()
log_data["remote_cache_time_restarted"] = get_remote_cache_time()
log_data["remote_cache_requests_restarted"] = get_remote_cache_requests()
log_data["markdown_time_restarted"] = get_markdown_time()
log_data["markdown_requests_restarted"] = get_markdown_requests()
def async_request_timer_restart(request: HttpRequest) -> None:
log_data = RequestNotes.get_notes(request).log_data
assert log_data is not None
if "time_restarted" in log_data:
# Don't destroy data when being called from
# finish_current_handler
return
record_request_restart_data(log_data)
def record_request_start_data(log_data: MutableMapping[str, Any]) -> None:
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"] = cProfile.Profile()
log_data["prof"].enable()
reset_queries()
log_data["time_started"] = time.time()
log_data["remote_cache_time_start"] = get_remote_cache_time()
log_data["remote_cache_requests_start"] = get_remote_cache_requests()
log_data["markdown_time_start"] = get_markdown_time()
log_data["markdown_requests_start"] = get_markdown_requests()
def timedelta_ms(timedelta: float) -> float:
return timedelta * 1000
def format_timedelta(timedelta: float) -> str:
if timedelta >= 1:
return f"{timedelta:.1f}s"
return f"{timedelta_ms(timedelta):.0f}ms"
def is_slow_query(time_delta: float, path: str) -> bool:
if time_delta < 1.2:
return False
is_exempt = path == "/activity" or path.startswith(("/realm_activity/", "/user_activity/"))
if is_exempt:
return time_delta >= 5
if "webathena_kerberos" in path:
return time_delta >= 10
return True
def write_log_line(
log_data: MutableMapping[str, Any],
path: str,
method: str,
remote_ip: str,
requester_for_logs: str,
client_name: str,
client_version: Optional[str] = None,
status_code: int = 200,
error_content: Optional[bytes] = None,
) -> None:
time_delta = -1
# A time duration of -1 means the StartLogRequests middleware
# didn't run for some reason
optional_orig_delta = ""
if "time_started" in log_data:
time_delta = time.time() - log_data["time_started"]
if "time_stopped" in log_data:
orig_time_delta = time_delta
time_delta = (log_data["time_stopped"] - log_data["time_started"]) + (
time.time() - log_data["time_restarted"]
)
optional_orig_delta = f" (lp: {format_timedelta(orig_time_delta)})"
remote_cache_output = ""
if "remote_cache_time_start" in log_data:
remote_cache_time_delta = get_remote_cache_time() - log_data["remote_cache_time_start"]
remote_cache_count_delta = (
get_remote_cache_requests() - log_data["remote_cache_requests_start"]
)
if "remote_cache_requests_stopped" in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
remote_cache_time_delta += (
log_data["remote_cache_time_stopped"] - log_data["remote_cache_time_restarted"]
)
remote_cache_count_delta += (
log_data["remote_cache_requests_stopped"]
- log_data["remote_cache_requests_restarted"]
)
if remote_cache_time_delta > 0.005:
remote_cache_output = (
f" (mem: {format_timedelta(remote_cache_time_delta)}/{remote_cache_count_delta})"
)
startup_output = ""
if "startup_time_delta" in log_data and log_data["startup_time_delta"] > 0.005:
startup_output = " (+start: {})".format(format_timedelta(log_data["startup_time_delta"]))
markdown_output = ""
if "markdown_time_start" in log_data:
markdown_time_delta = get_markdown_time() - log_data["markdown_time_start"]
markdown_count_delta = get_markdown_requests() - log_data["markdown_requests_start"]
if "markdown_requests_stopped" in log_data:
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
markdown_time_delta += (
log_data["markdown_time_stopped"] - log_data["markdown_time_restarted"]
)
markdown_count_delta += (
log_data["markdown_requests_stopped"] - log_data["markdown_requests_restarted"]
)
if markdown_time_delta > 0.005:
markdown_output = (
f" (md: {format_timedelta(markdown_time_delta)}/{markdown_count_delta})"
)
# Get the amount of time spent doing database queries
db_time_output = ""
queries = connection.connection.queries if connection.connection is not None else []
if len(queries) > 0:
query_time = sum(float(query.get("time", 0)) for query in queries)
db_time_output = f" (db: {format_timedelta(query_time)}/{len(queries)}q)"
if "extra" in log_data:
extra_request_data = " {}".format(log_data["extra"])
else:
extra_request_data = ""
if client_version is None:
logger_client = f"({requester_for_logs} via {client_name})"
else:
logger_client = f"({requester_for_logs} via {client_name}/{client_version})"
logger_timing = f"{format_timedelta(time_delta):>5}{optional_orig_delta}{remote_cache_output}{markdown_output}{db_time_output}{startup_output} {path}"
logger_line = f"{remote_ip:<15} {method:<7} {status_code:3} {logger_timing}{extra_request_data} {logger_client}"
if status_code in [200, 304] and method == "GET" and path.startswith("/static"):
logger.debug(logger_line)
else:
logger.info(logger_line)
if is_slow_query(time_delta, path):
slow_query_logger.info(logger_line)
if settings.PROFILE_ALL_REQUESTS:
log_data["prof"].disable()
with tempfile.NamedTemporaryFile(
prefix="profile.data.{}.{}.".format(path.split("/")[-1], int(time_delta * 1000)),
delete=False,
) as stats_file:
log_data["prof"].dump_stats(stats_file.name)
# Log some additional data whenever we return certain 40x errors
if 400 <= status_code < 500 and status_code not in [401, 404, 405]:
error_data = repr(error_content)
if len(error_data) > 200:
error_data = "[content more than 200 characters]"
logger.info("status=%3d, data=%s, uid=%s", status_code, error_data, requester_for_logs)
# We take advantage of `has_request_variables` being called multiple times
# when processing a request in order to process any `client` parameter that
# may have been sent in the request content.
@has_request_variables
def parse_client(
request: HttpRequest,
# As `client` is a common element to all API endpoints, we choose
# not to document on every endpoint's individual parameters.
req_client: Optional[str] = REQ("client", default=None, intentionally_undocumented=True),
) -> Tuple[str, Optional[str]]:
# If the API request specified a client in the request content,
# that has priority. Otherwise, extract the client from the
# USER_AGENT.
if req_client is not None:
return req_client, None
if "User-Agent" in request.headers:
user_agent: Optional[Dict[str, str]] = parse_user_agent(request.headers["User-Agent"])
else:
user_agent = None
if user_agent is None:
# In the future, we will require setting USER_AGENT, but for
# now we just want to tag these requests so we can review them
# in logs and figure out the extent of the problem
return "Unspecified", None
client_name = user_agent["name"]
if client_name.startswith("Zulip"):
return client_name, user_agent.get("version")
# We could show browser versions in logs, and it'd probably be a
# good idea, but the current parsing will just get you Mozilla/5.0.
#
# Fixing this probably means using a third-party library, and
# making sure it's fast enough that we're happy to do it even on
# hot-path cases.
return client_name, None
class LogRequests(MiddlewareMixin):
# We primarily are doing logging using the process_view hook, but
# for some views, process_view isn't run, so we call the start
# method here too
def process_request(self, request: HttpRequest) -> None:
maybe_tracemalloc_listen()
request_notes = RequestNotes.get_notes(request)
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
if request_notes.log_data is not None:
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
# Sanity check to ensure this is being called from the
# Tornado code path that returns responses asynchronously.
assert request_notes.saved_response is not None
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
# Avoid re-initializing request_notes.log_data if it's already there.
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
return
try:
request_notes.client_name, request_notes.client_version = parse_client(request)
except JsonableError as e:
logging.exception(e)
request_notes.client_name = "Unparsable"
request_notes.client_version = None
set_tag("client", request_notes.client_name)
request_notes.log_data = {}
record_request_start_data(request_notes.log_data)
def process_view(
self,
request: HttpRequest,
view_func: Callable[Concatenate[HttpRequest, ParamT], HttpResponseBase],
args: List[object],
kwargs: Dict[str, Any],
) -> None:
request_notes = RequestNotes.get_notes(request)
if request_notes.saved_response is not None:
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
# The below logging adjustments are unnecessary (because
# we've already imported everything) and incorrect
# (because they'll overwrite data from pre-long-poll
# request processing) when returning a saved response.
return
# process_request was already run; we save the initialization
# time (i.e. the time between receiving the request and
# figuring out which view function to call, which is primarily
# importing modules on the first start)
assert request_notes.log_data is not None
request_notes.log_data["startup_time_delta"] = (
time.time() - request_notes.log_data["time_started"]
)
# And then completely reset our tracking to only cover work
# done as part of this request
record_request_start_data(request_notes.log_data)
def process_response(
self, request: HttpRequest, response: HttpResponseBase
) -> HttpResponseBase:
if isinstance(response, AsynchronousResponse):
# This special AsynchronousResponse sentinel is
tornado: Rewrite Django integration to duplicate less code. Since essentially the first use of Tornado in Zulip, we've been maintaining our Tornado+Django system, AsyncDjangoHandler, with several hundred lines of Django code copied into it. The goal for that code was simple: We wanted a way to use our Django middleware (for code sharing reasons) inside a Tornado process (since we wanted to use Tornado for our async events system). As part of the Django 2.2.x upgrade, I looked at upgrading this implementation to be based off modern Django, and it's definitely possible to do that: * Continue forking load_middleware to save response middleware. * Continue manually running the Django response middleware. * Continue working out a hack involving copying all of _get_response to change a couple lines allowing us our Tornado code to not actually return the Django HttpResponse so we can long-poll. The previous hack of returning None stopped being viable with the Django 2.2 MiddlewareMixin.__call__ implementation. But I decided to take this opportunity to look at trying to avoid copying material Django code, and there is a way to do it: * Replace RespondAsynchronously with a response.asynchronous attribute on the HttpResponse; this allows Django to run its normal plumbing happily in a way that should be stable over time, and then we proceed to discard the response inside the Tornado `get()` method to implement long-polling. (Better yet might be raising an exception?). This lets us eliminate maintaining a patched copy of _get_response. * Removing the @asynchronous decorator, which didn't add anything now that we only have one API endpoint backend (with two frontend call points) that could call into this. Combined with the last bullet, this lets us remove a significant hack from our never_cache_responses function. * Calling the normal Django `get_response` method from zulip_finish after creating a duplicate request to process, rather than writing totally custom code to do that. This lets us eliminate maintaining a patched copy of Django's load_middleware. * Adding detailed comments explaining how this is supposed to work, what problems we encounter, and how we solve various problems, which is critical to being able to modify this code in the future. A key advantage of these changes is that the exact same code should work on Django 1.11, Django 2.2, and Django 3.x, because we're no longer copying large blocks of core Django code and thus should be much less vulnerable to refactors. There may be a modest performance downside, in that we now run both request and response middleware twice when longpolling (once for the request we discard). We may be able to avoid the expensive part of it, Zulip's own request/response middleware, with a bit of additional custom code to save work for requests where we're planning to discard the response. Profiling will be important to understanding what's worth doing here.
2020-02-06 22:09:10 +01:00
# discarded after going through this code path as Tornado
# intends to block, so we stop here to avoid unnecessary work.
return response
remote_ip = request.META["REMOTE_ADDR"]
# Get the requester's identifier and client, if available.
request_notes = RequestNotes.get_notes(request)
requester_for_logs = request_notes.requester_for_logs
if requester_for_logs is None:
if request_notes.remote_server is not None:
requester_for_logs = request_notes.remote_server.format_requester_for_logs()
elif request.user.is_authenticated:
requester_for_logs = request.user.format_requester_for_logs()
else:
requester_for_logs = "unauth@{}".format(get_subdomain(request) or "root")
content = response.content if isinstance(response, HttpResponse) else None
assert request_notes.client_name is not None and request_notes.log_data is not None
assert request.method is not None
write_log_line(
request_notes.log_data,
request.path,
request.method,
remote_ip,
requester_for_logs,
request_notes.client_name,
client_version=request_notes.client_version,
status_code=response.status_code,
error_content=content,
)
return response
class JsonErrorHandler(MiddlewareMixin):
def __init__(self, get_response: Callable[[HttpRequest], HttpResponseBase]) -> None:
super().__init__(get_response)
ignore_logger("zerver.middleware.json_error_handler")
def process_exception(
self, request: HttpRequest, exception: Exception
) -> Optional[HttpResponse]:
if isinstance(exception, MissingAuthenticationError):
if "text/html" in request.headers.get("Accept", ""):
# If this looks like a request from a top-level page in a
# browser, send the user to the login page.
#
# TODO: The next part is a bit questionable; it will
# execute the likely intent for intentionally visiting
# an API endpoint without authentication in a browser,
# but that's an unlikely to be done intentionally often.
return HttpResponseRedirect(
f"{settings.HOME_NOT_LOGGED_IN}?{urlencode({'next': request.path})}"
)
if request.path.startswith("/api"):
# For API routes, ask for HTTP basic auth (email:apiKey).
return json_unauthorized()
else:
# For /json routes, ask for session authentication.
return json_unauthorized(www_authenticate="session")
if isinstance(exception, JsonableError):
response = json_response_from_error(exception)
if response.status_code >= 500:
# Here we use Django's log_response the way Django uses
# it normally to log error responses. However, we make the small
# modification of including the traceback to make the log message
# more helpful. log_response takes care of knowing not to duplicate
# the logging, so Django won't generate a second log message.
log_response(
"%s: %s",
response.reason_phrase,
request.path,
response=response,
request=request,
exception=exception,
)
return response
if RequestNotes.get_notes(request).error_format == "JSON" and not settings.TEST_SUITE:
capture_exception(exception)
json_error_logger = logging.getLogger("zerver.middleware.json_error_handler")
json_error_logger.error(traceback.format_exc(), extra=dict(request=request))
return json_response(res_type="error", msg=_("Internal server error"), status=500)
return None
class TagRequests(MiddlewareMixin):
def process_view(
self,
request: HttpRequest,
view_func: Callable[Concatenate[HttpRequest, ParamT], HttpResponseBase],
args: List[object],
kwargs: Dict[str, Any],
) -> None:
self.process_request(request)
def process_request(self, request: HttpRequest) -> None:
if request.path.startswith("/api/") or request.path.startswith("/json/"):
RequestNotes.get_notes(request).error_format = "JSON"
else:
RequestNotes.get_notes(request).error_format = "HTML"
class CsrfFailureError(JsonableError):
http_status_code = 403
code = ErrorCode.CSRF_FAILED
data_fields = ["reason"]
def __init__(self, reason: str) -> None:
python: Convert assignment type annotations to Python 3.6 style. This commit was split by tabbott; this piece covers the vast majority of files in Zulip, but excludes scripts/, tools/, and puppet/ to help ensure we at least show the right error messages for Xenial systems. We can likely further refine the remaining pieces with some testing. Generated by com2ann, with whitespace fixes and various manual fixes for runtime issues: - invoiced_through: Optional[LicenseLedger] = models.ForeignKey( + invoiced_through: Optional["LicenseLedger"] = models.ForeignKey( -_apns_client: Optional[APNsClient] = None +_apns_client: Optional["APNsClient"] = None - notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) - signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) + notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) + signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) - author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE) + author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE) - bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) + bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) - default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) - default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) + default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) + default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) -descriptors_by_handler_id: Dict[int, ClientDescriptor] = {} +descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {} -worker_classes: Dict[str, Type[QueueProcessingWorker]] = {} -queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {} +worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {} +queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {} -AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None +AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
self.reason: str = reason
@staticmethod
def msg_format() -> str:
return _("CSRF error: {reason}")
def csrf_failure(request: HttpRequest, reason: str = "") -> HttpResponse:
if RequestNotes.get_notes(request).error_format == "JSON":
return json_response_from_error(CsrfFailureError(reason))
else:
return html_csrf_failure(request, reason)
class LocaleMiddleware(DjangoLocaleMiddleware):
def process_response(
self, request: HttpRequest, response: HttpResponseBase
) -> HttpResponseBase:
# This is the same as the default LocaleMiddleware, minus the
# logic that redirects 404's that lack a prefixed language in
# the path into having a language. See
# https://code.djangoproject.com/ticket/32005
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
i18n_patterns_used, _ = is_language_prefix_patterns_used(urlconf)
if not (i18n_patterns_used and language_from_path):
patch_vary_headers(response, ("Accept-Language",))
assert language is not None
response.setdefault("Content-Language", language)
# An additional responsibility of our override of this middleware is to save the user's language
# preference in a cookie. That determination is made by code handling the request
# and saved in the set_language flag so that it can be used here.
set_language = RequestNotes.get_notes(request).set_language
if set_language is not None:
response.set_cookie(
settings.LANGUAGE_COOKIE_NAME,
set_language,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN,
secure=settings.LANGUAGE_COOKIE_SECURE,
httponly=settings.LANGUAGE_COOKIE_HTTPONLY,
samesite=settings.LANGUAGE_COOKIE_SAMESITE,
)
return response
class RateLimitMiddleware(MiddlewareMixin):
def set_response_headers(
self, response: HttpResponseBase, rate_limit_results: List[RateLimitResult]
) -> None:
# The limit on the action that was requested is the minimum of the limits that get applied:
limit = min(result.entity.max_api_calls() for result in rate_limit_results)
response["X-RateLimit-Limit"] = str(limit)
# Same principle applies to remaining API calls:
remaining_api_calls = min(result.remaining for result in rate_limit_results)
response["X-RateLimit-Remaining"] = str(remaining_api_calls)
# The full reset time is the maximum of the reset times for the limits that get applied:
reset_time = time.time() + max(result.secs_to_freedom for result in rate_limit_results)
response["X-RateLimit-Reset"] = str(int(reset_time))
def process_response(
self, request: HttpRequest, response: HttpResponseBase
) -> HttpResponseBase:
if not settings.RATE_LIMITING:
return response
# Add X-RateLimit-*** headers
ratelimits_applied = RequestNotes.get_notes(request).ratelimits_applied
if len(ratelimits_applied) > 0:
self.set_response_headers(response, ratelimits_applied)
return response
class FlushDisplayRecipientCache(MiddlewareMixin):
def process_response(
self, request: HttpRequest, response: HttpResponseBase
) -> HttpResponseBase:
# We flush the per-request caches after every request, so they
# are not shared at all between requests.
flush_per_request_caches()
return response
class HostDomainMiddleware(MiddlewareMixin):
def process_request(self, request: HttpRequest) -> Optional[HttpResponse]:
# Match against ALLOWED_HOSTS, which is rather permissive;
# failure will raise DisallowedHost, which is a 400.
request.get_host()
# This check is important to avoid doing the extra work of
# `get_realm` (which does a database query that could be
# problematic for Tornado). Also the error page below is only
# appropriate for a page visited in a browser, not the API.
#
# API authentication will end up checking for an invalid
# realm, and throw a JSON-format error if appropriate.
if request.path.startswith(("/static/", "/api/", "/json/")):
return None
subdomain = get_subdomain(request)
if subdomain == settings.SOCIAL_AUTH_SUBDOMAIN:
# Realms are not supposed to exist on SOCIAL_AUTH_SUBDOMAIN.
return None
request_notes = RequestNotes.get_notes(request)
try:
request_notes.realm = get_realm(subdomain)
request_notes.has_fetched_realm = True
except Realm.DoesNotExist:
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
# The root domain is used for creating new
# organizations even if it does not host a realm.
return None
return render(request, "zerver/invalid_realm.html", status=404)
set_tag("realm", request_notes.realm.string_id)
# Check that we're not using the non-canonical form of a REALM_HOSTS subdomain
if subdomain in settings.REALM_HOSTS:
host = request.get_host().lower()
formal_host = request_notes.realm.host
if host != formal_host and not host.startswith(formal_host + ":"):
return HttpResponseRedirect(
urljoin(request_notes.realm.uri, request.get_full_path())
)
return None
class SetRemoteAddrFromRealIpHeader(MiddlewareMixin):
"""Middleware that sets REMOTE_ADDR based on the X-Real-Ip header.
This middleware is similar to Django's old
SetRemoteAddrFromForwardedFor middleware. We use X-Real-Ip, and
not X-Forwarded-For, because the latter is a list of proxies, some
number of which are trusted by us, and some of which could be
arbitrarily set by the user. nginx has already parsed which are
which, and has set X-Real-Ip to the first one, going right to
left, which is untrusted.
Since we are always deployed behind nginx, we can trust the
X-Real-Ip which is so set. In development, we fall back to the
REMOTE_ADDR supplied by the server.
"""
def process_request(self, request: HttpRequest) -> None:
try:
real_ip = request.headers["X-Real-IP"]
except KeyError:
pass
else:
request.META["REMOTE_ADDR"] = real_ip
class ProxyMisconfigurationError(JsonableError):
http_status_code = 500
data_fields = ["proxy_reason"]
def __init__(self, proxy_reason: str) -> None:
self.proxy_reason = proxy_reason
@staticmethod
def msg_format() -> str:
return _("Reverse proxy misconfiguration: {proxy_reason}")
class DetectProxyMisconfiguration(MiddlewareMixin):
def process_view(
self,
request: HttpRequest,
view_func: Callable[Concatenate[HttpRequest, ParamT], HttpResponseBase],
args: List[object],
kwargs: Dict[str, Any],
) -> None:
proxy_state_header = request.headers.get("X-Proxy-Misconfiguration", "")
# Our nginx configuration sets this header if:
# - there is an X-Forwarded-For set but no proxies configured in Zulip
# - proxies are configured but the request did not come from them
# - proxies are configured and the request came from them,
# but there was no X-Forwarded-Proto header
#
# Note that the first two may be false-positives. We only
# display the error if the request also came in over HTTP (and
# a trusted proxy didn't say they get it over HTTPS), which
# should be impossible because Zulip only supports external
# https:// URLs in production. nginx configuration ensures
# that request.is_secure() is only true if our nginx is
# serving the request over HTTPS, or it came from a trusted
# proxy which reports that it is doing so. This will result
# in false negatives if Zulip's nginx is serving responses
# over HTTPS to a proxy whose IP is not configured, or
# misconfigured, but we cannot distinguish this from a random
# client which is providing proxy headers to a correctly
# configured Zulip.
#
# There is a complication to the above logic -- we do expect
# that requests not through the proxy may happen from
# localhost over HTTP (e.g. the email gateway). Skip warnings
# if the remote IP is localhost.
if (
proxy_state_header != ""
and not request.is_secure()
and request.META["REMOTE_ADDR"] not in ("127.0.0.1", "::1")
):
raise ProxyMisconfigurationError(proxy_state_header)
def alter_content(request: HttpRequest, content: bytes) -> bytes:
first_paragraph_text = get_content_description(content, request)
placeholder_open_graph_description = RequestNotes.get_notes(
request
).placeholder_open_graph_description
assert placeholder_open_graph_description is not None
return content.replace(
placeholder_open_graph_description.encode(),
first_paragraph_text.encode(),
)
class FinalizeOpenGraphDescription(MiddlewareMixin):
def process_response(
self, request: HttpRequest, response: HttpResponseBase
) -> HttpResponseBase:
if RequestNotes.get_notes(request).placeholder_open_graph_description is not None:
assert isinstance(response, HttpResponse)
response.content = alter_content(request, response.content)
return response
def validate_scim_bearer_token(request: HttpRequest) -> bool:
"""
This function verifies the request is allowed to make SCIM requests on this subdomain,
by checking the provided bearer token and ensuring it matches a scim client configured
for this subdomain in settings.SCIM_CONFIG.
Returns True if successful.
"""
subdomain = get_subdomain(request)
scim_config_dict = settings.SCIM_CONFIG.get(subdomain)
if not scim_config_dict:
return False
valid_bearer_token = scim_config_dict.get("bearer_token")
scim_client_name = scim_config_dict.get("scim_client_name")
# We really don't want a misconfiguration where this is unset,
# allowing free access to the SCIM API:
assert valid_bearer_token
assert scim_client_name
authorization = request.headers.get("Authorization")
if authorization is None or not constant_time_compare(
authorization, f"Bearer {valid_bearer_token}"
):
return False
request_notes = RequestNotes.get_notes(request)
assert request_notes.realm is not None
request_notes.requester_for_logs = (
f"scim-client:{scim_client_name}:realm:{request_notes.realm.id}"
)
return True
class ZulipSCIMAuthCheckMiddleware(SCIMAuthCheckMiddleware):
"""
2021-10-18 16:30:46 +02:00
Overridden version of middleware implemented in django-scim2
(https://github.com/15five/django-scim2/blob/master/src/django_scim/middleware.py)
to also handle authenticating the client.
This doesn't actually function as a regular middleware class that's registered in
settings.MIDDLEWARE, but rather is called inside django-scim2 logic to authenticate
the request when accessing SCIM endpoints.
"""
def process_request(self, request: HttpRequest) -> Optional[HttpResponse]:
# Defensive assertion to ensure this can't accidentally get called on a request
# to a non-SCIM endpoint.
assert request.path.startswith(self.reverse_url)
# This determines whether this is a SCIM request based on the request's path
# and if it is, logs request information, including the body, as well as the response
# for debugging purposes to the `django_scim.middleware` logger, at DEBUG level.
# We keep those logs in /var/log/zulip/scim.log
if self.should_log_request(request):
self.log_request(request)
if not validate_scim_bearer_token(request):
# In case of failed authentication, a response should be returned to
# prevent going further down the codepath (to the SCIM endpoint), since
# this aspect works like regular middleware.
response = HttpResponse(status=401)
response["WWW-Authenticate"] = scim_settings.WWW_AUTHENTICATE_HEADER
return response
return None
class ZulipNoopMiddleware(MiddlewareMixin):
pass