2017-11-16 00:50:28 +01:00
|
|
|
import cProfile
|
|
|
|
import logging
|
|
|
|
import time
|
|
|
|
import traceback
|
2021-08-10 15:17:44 +02:00
|
|
|
from typing import (
|
|
|
|
Any,
|
|
|
|
AnyStr,
|
|
|
|
Callable,
|
|
|
|
Dict,
|
|
|
|
Iterable,
|
|
|
|
Iterator,
|
|
|
|
List,
|
|
|
|
MutableMapping,
|
|
|
|
Optional,
|
|
|
|
Tuple,
|
|
|
|
)
|
2022-05-12 06:27:31 +02:00
|
|
|
from urllib.parse import urlencode
|
2013-04-23 18:51:17 +02:00
|
|
|
|
2013-02-11 23:33:47 +01:00
|
|
|
from django.conf import settings
|
2020-09-01 02:56:35 +02:00
|
|
|
from django.conf.urls.i18n import is_language_prefix_patterns_used
|
2017-11-16 00:50:28 +01:00
|
|
|
from django.db import connection
|
2020-08-22 20:20:42 +02:00
|
|
|
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, StreamingHttpResponse
|
2021-07-26 16:29:19 +02:00
|
|
|
from django.http.response import HttpResponseBase
|
2020-02-14 20:29:05 +01:00
|
|
|
from django.middleware.common import CommonMiddleware
|
2020-09-01 02:56:35 +02:00
|
|
|
from django.middleware.locale import LocaleMiddleware as DjangoLocaleMiddleware
|
2019-02-02 23:53:55 +01:00
|
|
|
from django.shortcuts import render
|
2020-09-01 02:56:35 +02:00
|
|
|
from django.utils import translation
|
|
|
|
from django.utils.cache import patch_vary_headers
|
2017-05-18 11:56:03 +02:00
|
|
|
from django.utils.deprecation import MiddlewareMixin
|
2021-04-16 00:57:30 +02:00
|
|
|
from django.utils.translation import gettext as _
|
2017-11-16 00:50:28 +01:00
|
|
|
from django.views.csrf import csrf_failure as html_csrf_failure
|
2021-09-10 18:36:56 +02:00
|
|
|
from django_scim.middleware import SCIMAuthCheckMiddleware
|
|
|
|
from django_scim.settings import scim_settings
|
2020-07-02 02:23:58 +02:00
|
|
|
from sentry_sdk import capture_exception
|
|
|
|
from sentry_sdk.integrations.logging import ignore_logger
|
2016-05-25 15:02:02 +02:00
|
|
|
|
2019-04-24 02:50:25 +02:00
|
|
|
from zerver.lib.cache import get_remote_cache_requests, get_remote_cache_time
|
2018-11-01 00:19:48 +01:00
|
|
|
from zerver.lib.db import reset_queries
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.debug import maybe_tracemalloc_listen
|
2020-11-27 16:33:01 +01:00
|
|
|
from zerver.lib.exceptions import ErrorCode, JsonableError, MissingAuthenticationError
|
2019-04-24 02:50:25 +02:00
|
|
|
from zerver.lib.html_to_text import get_content_description
|
2020-06-26 23:06:05 +02:00
|
|
|
from zerver.lib.markdown import get_markdown_requests, get_markdown_time
|
2020-03-04 14:05:25 +01:00
|
|
|
from zerver.lib.rate_limiter import RateLimitResult
|
2022-03-31 17:23:44 +02:00
|
|
|
from zerver.lib.request import REQ, RequestNotes, has_request_variables, set_request, unset_request
|
2021-07-04 10:00:55 +02:00
|
|
|
from zerver.lib.response import json_response, json_response_from_error, json_unauthorized
|
2017-10-19 07:21:57 +02:00
|
|
|
from zerver.lib.subdomains import get_subdomain
|
2018-03-14 23:16:27 +01:00
|
|
|
from zerver.lib.types import ViewFuncT
|
2020-03-10 19:32:20 +01:00
|
|
|
from zerver.lib.user_agent import parse_user_agent
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.utils import statsd
|
2021-09-10 18:36:56 +02:00
|
|
|
from zerver.models import Realm, SCIMClient, flush_per_request_caches, get_realm
|
2012-10-16 23:52:10 +02:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
logger = logging.getLogger("zulip.requests")
|
|
|
|
slow_query_logger = logging.getLogger("zulip.slow_queries")
|
2012-10-16 23:52:10 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-27 07:33:05 +01:00
|
|
|
def record_request_stop_data(log_data: MutableMapping[str, Any]) -> None:
|
2021-02-12 08:20:45 +01:00
|
|
|
log_data["time_stopped"] = time.time()
|
|
|
|
log_data["remote_cache_time_stopped"] = get_remote_cache_time()
|
|
|
|
log_data["remote_cache_requests_stopped"] = get_remote_cache_requests()
|
|
|
|
log_data["markdown_time_stopped"] = get_markdown_time()
|
|
|
|
log_data["markdown_requests_stopped"] = get_markdown_requests()
|
2013-11-18 18:55:19 +01:00
|
|
|
if settings.PROFILE_ALL_REQUESTS:
|
|
|
|
log_data["prof"].disable()
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-10-17 00:39:10 +02:00
|
|
|
def async_request_timer_stop(request: HttpRequest) -> None:
|
2021-08-21 19:24:20 +02:00
|
|
|
log_data = RequestNotes.get_notes(request).log_data
|
2021-07-09 10:06:04 +02:00
|
|
|
assert log_data is not None
|
|
|
|
record_request_stop_data(log_data)
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-27 07:33:05 +01:00
|
|
|
def record_request_restart_data(log_data: MutableMapping[str, Any]) -> None:
|
2013-11-18 18:55:19 +01:00
|
|
|
if settings.PROFILE_ALL_REQUESTS:
|
|
|
|
log_data["prof"].enable()
|
2021-02-12 08:20:45 +01:00
|
|
|
log_data["time_restarted"] = time.time()
|
|
|
|
log_data["remote_cache_time_restarted"] = get_remote_cache_time()
|
|
|
|
log_data["remote_cache_requests_restarted"] = get_remote_cache_requests()
|
|
|
|
log_data["markdown_time_restarted"] = get_markdown_time()
|
|
|
|
log_data["markdown_requests_restarted"] = get_markdown_requests()
|
2013-04-23 19:36:50 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-10-17 00:39:10 +02:00
|
|
|
def async_request_timer_restart(request: HttpRequest) -> None:
|
2021-08-21 19:24:20 +02:00
|
|
|
log_data = RequestNotes.get_notes(request).log_data
|
2021-07-09 10:06:04 +02:00
|
|
|
assert log_data is not None
|
|
|
|
if "time_restarted" in log_data:
|
2013-12-12 18:59:02 +01:00
|
|
|
# Don't destroy data when being called from
|
|
|
|
# finish_current_handler
|
|
|
|
return
|
2021-07-09 10:06:04 +02:00
|
|
|
record_request_restart_data(log_data)
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-27 07:33:05 +01:00
|
|
|
def record_request_start_data(log_data: MutableMapping[str, Any]) -> None:
|
2013-11-18 18:55:19 +01:00
|
|
|
if settings.PROFILE_ALL_REQUESTS:
|
|
|
|
log_data["prof"] = cProfile.Profile()
|
|
|
|
log_data["prof"].enable()
|
|
|
|
|
2018-11-01 00:19:48 +01:00
|
|
|
reset_queries()
|
2021-02-12 08:20:45 +01:00
|
|
|
log_data["time_started"] = time.time()
|
|
|
|
log_data["remote_cache_time_start"] = get_remote_cache_time()
|
|
|
|
log_data["remote_cache_requests_start"] = get_remote_cache_requests()
|
|
|
|
log_data["markdown_time_start"] = get_markdown_time()
|
|
|
|
log_data["markdown_requests_start"] = get_markdown_requests()
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-27 07:33:05 +01:00
|
|
|
def timedelta_ms(timedelta: float) -> float:
|
2013-11-08 23:11:37 +01:00
|
|
|
return timedelta * 1000
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-11-27 07:33:05 +01:00
|
|
|
def format_timedelta(timedelta: float) -> str:
|
2021-02-12 08:19:30 +01:00
|
|
|
if timedelta >= 1:
|
2020-06-10 06:41:04 +02:00
|
|
|
return f"{timedelta:.1f}s"
|
|
|
|
return f"{timedelta_ms(timedelta):.0f}ms"
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2018-05-11 01:39:17 +02:00
|
|
|
def is_slow_query(time_delta: float, path: str) -> bool:
|
2013-12-26 15:16:49 +01:00
|
|
|
if time_delta < 1.2:
|
2013-12-26 15:13:00 +01:00
|
|
|
return False
|
2021-02-12 08:19:30 +01:00
|
|
|
is_exempt = (
|
|
|
|
path in ["/activity", "/json/report/error", "/api/v1/deployments/report_error"]
|
|
|
|
or path.startswith("/realm_activity/")
|
2013-12-26 15:13:00 +01:00
|
|
|
or path.startswith("/user_activity/")
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2013-12-26 15:13:00 +01:00
|
|
|
if is_exempt:
|
2013-12-26 15:23:18 +01:00
|
|
|
return time_delta >= 5
|
2021-02-12 08:20:45 +01:00
|
|
|
if "webathena_kerberos" in path:
|
2013-12-26 15:20:59 +01:00
|
|
|
return time_delta >= 10
|
2013-12-26 15:13:00 +01:00
|
|
|
return True
|
2013-12-26 15:01:46 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-02-28 02:46:00 +01:00
|
|
|
statsd_blacklisted_requests = [
|
2021-02-12 08:20:45 +01:00
|
|
|
"do_confirm",
|
|
|
|
"signup_send_confirm",
|
|
|
|
"new_realm_send_confirm",
|
|
|
|
"eventslast_event_id",
|
|
|
|
"webreq.content",
|
|
|
|
"avatar",
|
|
|
|
"user_uploads",
|
|
|
|
"password.reset",
|
|
|
|
"static",
|
|
|
|
"json.bots",
|
|
|
|
"json.users",
|
|
|
|
"json.streams",
|
|
|
|
"accounts.unsubscribe",
|
|
|
|
"apple-touch-icon",
|
|
|
|
"emoji",
|
|
|
|
"json.bots",
|
|
|
|
"upload_file",
|
|
|
|
"realm_activity",
|
|
|
|
"user_activity",
|
2019-02-28 02:46:00 +01:00
|
|
|
]
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def write_log_line(
|
|
|
|
log_data: MutableMapping[str, Any],
|
|
|
|
path: str,
|
|
|
|
method: str,
|
|
|
|
remote_ip: str,
|
|
|
|
requestor_for_logs: str,
|
|
|
|
client_name: str,
|
2020-03-11 19:08:44 +01:00
|
|
|
client_version: Optional[str] = None,
|
2021-02-12 08:19:30 +01:00
|
|
|
status_code: int = 200,
|
|
|
|
error_content: Optional[AnyStr] = None,
|
|
|
|
error_content_iter: Optional[Iterable[AnyStr]] = None,
|
|
|
|
) -> None:
|
2015-08-22 23:38:01 +02:00
|
|
|
assert error_content is None or error_content_iter is None
|
2015-08-22 23:18:31 +02:00
|
|
|
if error_content is not None:
|
|
|
|
error_content_iter = (error_content,)
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if settings.STATSD_HOST != "":
|
2019-02-28 02:46:00 +01:00
|
|
|
# For statsd timer name
|
2021-02-12 08:20:45 +01:00
|
|
|
if path == "/":
|
|
|
|
statsd_path = "webreq"
|
2019-02-28 02:46:00 +01:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
statsd_path = "webreq.{}".format(path[1:].replace("/", "."))
|
2022-02-08 00:13:33 +01:00
|
|
|
# Remove non-ascii chars from path (there should be none; if there are, it's
|
2019-02-28 02:46:00 +01:00
|
|
|
# because someone manually entered a nonexistent path), as UTF-8 chars make
|
|
|
|
# statsd sad when it sends the key name over the socket
|
2021-02-12 08:20:45 +01:00
|
|
|
statsd_path = statsd_path.encode("ascii", errors="ignore").decode("ascii")
|
2019-02-28 02:46:00 +01:00
|
|
|
# TODO: This could probably be optimized to use a regular expression rather than a loop.
|
2021-02-12 08:19:30 +01:00
|
|
|
suppress_statsd = any(
|
|
|
|
blacklisted in statsd_path for blacklisted in statsd_blacklisted_requests
|
|
|
|
)
|
2013-11-08 21:40:44 +01:00
|
|
|
else:
|
2019-02-28 02:46:00 +01:00
|
|
|
suppress_statsd = True
|
2021-02-12 08:20:45 +01:00
|
|
|
statsd_path = ""
|
2013-11-08 21:40:44 +01:00
|
|
|
|
|
|
|
time_delta = -1
|
|
|
|
# A time duration of -1 means the StartLogRequests middleware
|
|
|
|
# didn't run for some reason
|
|
|
|
optional_orig_delta = ""
|
2021-02-12 08:20:45 +01:00
|
|
|
if "time_started" in log_data:
|
|
|
|
time_delta = time.time() - log_data["time_started"]
|
|
|
|
if "time_stopped" in log_data:
|
2013-11-08 21:40:44 +01:00
|
|
|
orig_time_delta = time_delta
|
2021-02-12 08:20:45 +01:00
|
|
|
time_delta = (log_data["time_stopped"] - log_data["time_started"]) + (
|
|
|
|
time.time() - log_data["time_restarted"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2020-06-10 06:41:04 +02:00
|
|
|
optional_orig_delta = f" (lp: {format_timedelta(orig_time_delta)})"
|
2016-03-31 03:28:24 +02:00
|
|
|
remote_cache_output = ""
|
2021-02-12 08:20:45 +01:00
|
|
|
if "remote_cache_time_start" in log_data:
|
|
|
|
remote_cache_time_delta = get_remote_cache_time() - log_data["remote_cache_time_start"]
|
2021-02-12 08:19:30 +01:00
|
|
|
remote_cache_count_delta = (
|
2021-02-12 08:20:45 +01:00
|
|
|
get_remote_cache_requests() - log_data["remote_cache_requests_start"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
2021-02-12 08:20:45 +01:00
|
|
|
if "remote_cache_requests_stopped" in log_data:
|
2013-11-08 21:40:44 +01:00
|
|
|
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
|
2021-02-12 08:19:30 +01:00
|
|
|
remote_cache_time_delta += (
|
2021-02-12 08:20:45 +01:00
|
|
|
log_data["remote_cache_time_stopped"] - log_data["remote_cache_time_restarted"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
remote_cache_count_delta += (
|
2021-02-12 08:20:45 +01:00
|
|
|
log_data["remote_cache_requests_stopped"]
|
|
|
|
- log_data["remote_cache_requests_restarted"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
if remote_cache_time_delta > 0.005:
|
|
|
|
remote_cache_output = (
|
|
|
|
f" (mem: {format_timedelta(remote_cache_time_delta)}/{remote_cache_count_delta})"
|
|
|
|
)
|
2013-11-08 21:40:44 +01:00
|
|
|
|
|
|
|
if not suppress_statsd:
|
2020-06-10 06:41:04 +02:00
|
|
|
statsd.timing(f"{statsd_path}.remote_cache.time", timedelta_ms(remote_cache_time_delta))
|
|
|
|
statsd.incr(f"{statsd_path}.remote_cache.querycount", remote_cache_count_delta)
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2013-11-18 19:34:37 +01:00
|
|
|
startup_output = ""
|
2021-02-12 08:20:45 +01:00
|
|
|
if "startup_time_delta" in log_data and log_data["startup_time_delta"] > 0.005:
|
2020-06-10 06:41:04 +02:00
|
|
|
startup_output = " (+start: {})".format(format_timedelta(log_data["startup_time_delta"]))
|
2013-11-18 19:34:37 +01:00
|
|
|
|
2020-06-26 23:06:05 +02:00
|
|
|
markdown_output = ""
|
2021-02-12 08:20:45 +01:00
|
|
|
if "markdown_time_start" in log_data:
|
|
|
|
markdown_time_delta = get_markdown_time() - log_data["markdown_time_start"]
|
|
|
|
markdown_count_delta = get_markdown_requests() - log_data["markdown_requests_start"]
|
|
|
|
if "markdown_requests_stopped" in log_data:
|
2013-11-08 21:40:44 +01:00
|
|
|
# (now - restarted) + (stopped - start) = (now - start) + (stopped - restarted)
|
2021-02-12 08:19:30 +01:00
|
|
|
markdown_time_delta += (
|
2021-02-12 08:20:45 +01:00
|
|
|
log_data["markdown_time_stopped"] - log_data["markdown_time_restarted"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
markdown_count_delta += (
|
2021-02-12 08:20:45 +01:00
|
|
|
log_data["markdown_requests_stopped"] - log_data["markdown_requests_restarted"]
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
if markdown_time_delta > 0.005:
|
|
|
|
markdown_output = (
|
|
|
|
f" (md: {format_timedelta(markdown_time_delta)}/{markdown_count_delta})"
|
|
|
|
)
|
2013-11-08 21:40:44 +01:00
|
|
|
|
|
|
|
if not suppress_statsd:
|
2020-06-26 23:06:05 +02:00
|
|
|
statsd.timing(f"{statsd_path}.markdown.time", timedelta_ms(markdown_time_delta))
|
|
|
|
statsd.incr(f"{statsd_path}.markdown.count", markdown_count_delta)
|
2013-11-08 21:40:44 +01:00
|
|
|
|
|
|
|
# Get the amount of time spent doing database queries
|
|
|
|
db_time_output = ""
|
2014-01-07 22:20:29 +01:00
|
|
|
queries = connection.connection.queries if connection.connection is not None else []
|
|
|
|
if len(queries) > 0:
|
2021-02-12 08:20:45 +01:00
|
|
|
query_time = sum(float(query.get("time", 0)) for query in queries)
|
2020-06-10 06:41:04 +02:00
|
|
|
db_time_output = f" (db: {format_timedelta(query_time)}/{len(queries)}q)"
|
2013-11-08 21:40:44 +01:00
|
|
|
|
|
|
|
if not suppress_statsd:
|
|
|
|
# Log ms, db ms, and num queries to statsd
|
2020-06-10 06:41:04 +02:00
|
|
|
statsd.timing(f"{statsd_path}.dbtime", timedelta_ms(query_time))
|
|
|
|
statsd.incr(f"{statsd_path}.dbq", len(queries))
|
|
|
|
statsd.timing(f"{statsd_path}.total", timedelta_ms(time_delta))
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
if "extra" in log_data:
|
|
|
|
extra_request_data = " {}".format(log_data["extra"])
|
2013-11-08 21:40:44 +01:00
|
|
|
else:
|
|
|
|
extra_request_data = ""
|
2020-03-11 19:08:44 +01:00
|
|
|
if client_version is None:
|
|
|
|
logger_client = f"({requestor_for_logs} via {client_name})"
|
|
|
|
else:
|
|
|
|
logger_client = f"({requestor_for_logs} via {client_name}/{client_version})"
|
2021-02-12 08:20:45 +01:00
|
|
|
logger_timing = f"{format_timedelta(time_delta):>5}{optional_orig_delta}{remote_cache_output}{markdown_output}{db_time_output}{startup_output} {path}"
|
|
|
|
logger_line = f"{remote_ip:<15} {method:<7} {status_code:3} {logger_timing}{extra_request_data} {logger_client}"
|
2021-02-12 08:19:30 +01:00
|
|
|
if status_code in [200, 304] and method == "GET" and path.startswith("/static"):
|
2015-12-14 06:54:38 +01:00
|
|
|
logger.debug(logger_line)
|
|
|
|
else:
|
|
|
|
logger.info(logger_line)
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
if is_slow_query(time_delta, path):
|
2020-05-08 16:37:58 +02:00
|
|
|
slow_query_logger.info(logger_line)
|
2013-11-08 21:40:44 +01:00
|
|
|
|
2013-11-18 18:55:19 +01:00
|
|
|
if settings.PROFILE_ALL_REQUESTS:
|
|
|
|
log_data["prof"].disable()
|
2020-06-10 06:41:04 +02:00
|
|
|
profile_path = "/tmp/profile.data.{}.{}".format(path.split("/")[-1], int(time_delta * 1000))
|
2013-11-18 18:55:19 +01:00
|
|
|
log_data["prof"].dump_stats(profile_path)
|
|
|
|
|
2013-11-08 21:40:44 +01:00
|
|
|
# Log some additional data whenever we return certain 40x errors
|
|
|
|
if 400 <= status_code < 500 and status_code not in [401, 404, 405]:
|
2017-02-11 05:26:24 +01:00
|
|
|
assert error_content_iter is not None
|
2016-07-10 00:15:36 +02:00
|
|
|
error_content_list = list(error_content_iter)
|
2019-07-25 23:16:41 +02:00
|
|
|
if not error_content_list:
|
2021-02-12 08:20:45 +01:00
|
|
|
error_data = ""
|
2018-05-11 01:39:17 +02:00
|
|
|
elif isinstance(error_content_list[0], str):
|
2021-02-12 08:20:45 +01:00
|
|
|
error_data = "".join(error_content_list)
|
2017-11-09 09:03:33 +01:00
|
|
|
elif isinstance(error_content_list[0], bytes):
|
2021-02-12 08:20:45 +01:00
|
|
|
error_data = repr(b"".join(error_content_list))
|
2019-07-25 23:16:41 +02:00
|
|
|
if len(error_data) > 200:
|
2020-04-09 21:51:58 +02:00
|
|
|
error_data = "[content more than 200 characters]"
|
2021-02-12 08:20:45 +01:00
|
|
|
logger.info("status=%3d, data=%s, uid=%s", status_code, error_data, requestor_for_logs)
|
2013-04-23 19:36:50 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-09-04 00:32:43 +02:00
|
|
|
class RequestContext(MiddlewareMixin):
|
|
|
|
def __call__(self, request: HttpRequest) -> HttpResponse:
|
|
|
|
set_request(request)
|
|
|
|
try:
|
|
|
|
return self.get_response(request)
|
|
|
|
finally:
|
|
|
|
unset_request()
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2022-03-31 17:23:44 +02:00
|
|
|
# We take advantage of `has_request_variables` being called multiple times
|
|
|
|
# when processing a request in order to process any `client` parameter that
|
|
|
|
# may have been sent in the request content.
|
|
|
|
@has_request_variables
|
|
|
|
def parse_client(
|
|
|
|
request: HttpRequest,
|
|
|
|
# As `client` is a common element to all API endpoints, we choose
|
|
|
|
# not to document on every endpoint's individual parameters.
|
|
|
|
req_client: Optional[str] = REQ("client", default=None, intentionally_undocumented=True),
|
|
|
|
) -> Tuple[str, Optional[str]]:
|
2021-04-30 02:32:58 +02:00
|
|
|
# If the API request specified a client in the request content,
|
2022-03-31 17:23:44 +02:00
|
|
|
# that has priority. Otherwise, extract the client from the
|
|
|
|
# USER_AGENT.
|
|
|
|
if req_client is not None:
|
|
|
|
return req_client, None
|
2022-05-12 06:54:12 +02:00
|
|
|
if "User-Agent" in request.headers:
|
|
|
|
user_agent: Optional[Dict[str, str]] = parse_user_agent(request.headers["User-Agent"])
|
2021-04-30 02:32:58 +02:00
|
|
|
else:
|
|
|
|
user_agent = None
|
|
|
|
if user_agent is None:
|
|
|
|
# In the future, we will require setting USER_AGENT, but for
|
|
|
|
# now we just want to tag these requests so we can review them
|
|
|
|
# in logs and figure out the extent of the problem
|
|
|
|
return "Unspecified", None
|
|
|
|
|
|
|
|
client_name = user_agent["name"]
|
|
|
|
if client_name.startswith("Zulip"):
|
|
|
|
return client_name, user_agent.get("version")
|
|
|
|
|
|
|
|
# We could show browser versions in logs, and it'd probably be a
|
|
|
|
# good idea, but the current parsing will just get you Mozilla/5.0.
|
|
|
|
#
|
|
|
|
# Fixing this probably means using a third-party library, and
|
|
|
|
# making sure it's fast enough that we're happy to do it even on
|
|
|
|
# hot-path cases.
|
|
|
|
return client_name, None
|
|
|
|
|
|
|
|
|
2017-05-18 11:56:03 +02:00
|
|
|
class LogRequests(MiddlewareMixin):
|
2013-11-18 19:34:37 +01:00
|
|
|
# We primarily are doing logging using the process_view hook, but
|
|
|
|
# for some views, process_view isn't run, so we call the start
|
|
|
|
# method here too
|
2017-11-27 07:33:05 +01:00
|
|
|
def process_request(self, request: HttpRequest) -> None:
|
2017-10-04 01:29:53 +02:00
|
|
|
maybe_tracemalloc_listen()
|
2021-08-21 19:24:20 +02:00
|
|
|
request_notes = RequestNotes.get_notes(request)
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
|
2021-07-09 10:06:04 +02:00
|
|
|
if request_notes.log_data is not None:
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
# Sanity check to ensure this is being called from the
|
|
|
|
# Tornado code path that returns responses asynchronously.
|
2021-07-09 15:17:33 +02:00
|
|
|
assert request_notes.saved_response is not None
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
|
2021-07-09 10:06:04 +02:00
|
|
|
# Avoid re-initializing request_notes.log_data if it's already there.
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
return
|
|
|
|
|
2022-03-31 17:23:44 +02:00
|
|
|
try:
|
|
|
|
request_notes.client_name, request_notes.client_version = parse_client(request)
|
|
|
|
except JsonableError as e:
|
|
|
|
logging.exception(e)
|
|
|
|
request_notes.client_name = "Unparsable"
|
|
|
|
request_notes.client_version = None
|
|
|
|
|
2021-07-09 10:06:04 +02:00
|
|
|
request_notes.log_data = {}
|
|
|
|
record_request_start_data(request_notes.log_data)
|
2012-11-14 21:00:26 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def process_view(
|
2021-07-09 10:06:04 +02:00
|
|
|
self,
|
|
|
|
request: HttpRequest,
|
|
|
|
view_func: ViewFuncT,
|
|
|
|
args: List[str],
|
|
|
|
kwargs: Dict[str, Any],
|
2021-02-12 08:19:30 +01:00
|
|
|
) -> None:
|
2021-08-21 19:24:20 +02:00
|
|
|
request_notes = RequestNotes.get_notes(request)
|
2021-07-09 15:17:33 +02:00
|
|
|
if request_notes.saved_response is not None:
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
# The below logging adjustments are unnecessary (because
|
|
|
|
# we've already imported everything) and incorrect
|
|
|
|
# (because they'll overwrite data from pre-long-poll
|
|
|
|
# request processing) when returning a saved response.
|
|
|
|
return
|
|
|
|
|
2013-11-18 19:34:37 +01:00
|
|
|
# process_request was already run; we save the initialization
|
|
|
|
# time (i.e. the time between receiving the request and
|
|
|
|
# figuring out which view function to call, which is primarily
|
|
|
|
# importing modules on the first start)
|
2021-07-09 10:06:04 +02:00
|
|
|
assert request_notes.log_data is not None
|
|
|
|
request_notes.log_data["startup_time_delta"] = (
|
|
|
|
time.time() - request_notes.log_data["time_started"]
|
|
|
|
)
|
2013-11-18 19:34:37 +01:00
|
|
|
# And then completely reset our tracking to only cover work
|
|
|
|
# done as part of this request
|
2021-07-09 10:06:04 +02:00
|
|
|
record_request_start_data(request_notes.log_data)
|
2013-11-18 19:34:37 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def process_response(
|
2021-08-10 15:07:40 +02:00
|
|
|
self, request: HttpRequest, response: HttpResponseBase
|
|
|
|
) -> HttpResponseBase:
|
tornado: Rewrite Django integration to duplicate less code.
Since essentially the first use of Tornado in Zulip, we've been
maintaining our Tornado+Django system, AsyncDjangoHandler, with
several hundred lines of Django code copied into it.
The goal for that code was simple: We wanted a way to use our Django
middleware (for code sharing reasons) inside a Tornado process (since
we wanted to use Tornado for our async events system).
As part of the Django 2.2.x upgrade, I looked at upgrading this
implementation to be based off modern Django, and it's definitely
possible to do that:
* Continue forking load_middleware to save response middleware.
* Continue manually running the Django response middleware.
* Continue working out a hack involving copying all of _get_response
to change a couple lines allowing us our Tornado code to not
actually return the Django HttpResponse so we can long-poll. The
previous hack of returning None stopped being viable with the Django 2.2
MiddlewareMixin.__call__ implementation.
But I decided to take this opportunity to look at trying to avoid
copying material Django code, and there is a way to do it:
* Replace RespondAsynchronously with a response.asynchronous attribute
on the HttpResponse; this allows Django to run its normal plumbing
happily in a way that should be stable over time, and then we
proceed to discard the response inside the Tornado `get()` method to
implement long-polling. (Better yet might be raising an
exception?). This lets us eliminate maintaining a patched copy of
_get_response.
* Removing the @asynchronous decorator, which didn't add anything now
that we only have one API endpoint backend (with two frontend call
points) that could call into this. Combined with the last bullet,
this lets us remove a significant hack from our
never_cache_responses function.
* Calling the normal Django `get_response` method from zulip_finish
after creating a duplicate request to process, rather than writing
totally custom code to do that. This lets us eliminate maintaining
a patched copy of Django's load_middleware.
* Adding detailed comments explaining how this is supposed to work,
what problems we encounter, and how we solve various problems, which
is critical to being able to modify this code in the future.
A key advantage of these changes is that the exact same code should
work on Django 1.11, Django 2.2, and Django 3.x, because we're no
longer copying large blocks of core Django code and thus should be
much less vulnerable to refactors.
There may be a modest performance downside, in that we now run both
request and response middleware twice when longpolling (once for the
request we discard). We may be able to avoid the expensive part of
it, Zulip's own request/response middleware, with a bit of additional
custom code to save work for requests where we're planning to discard
the response. Profiling will be important to understanding what's
worth doing here.
2020-02-06 22:09:10 +01:00
|
|
|
if getattr(response, "asynchronous", False):
|
|
|
|
# This special Tornado "asynchronous" response is
|
|
|
|
# discarded after going through this code path as Tornado
|
|
|
|
# intends to block, so we stop here to avoid unnecessary work.
|
|
|
|
return response
|
|
|
|
|
2021-02-12 08:20:45 +01:00
|
|
|
remote_ip = request.META["REMOTE_ADDR"]
|
2012-10-16 23:52:10 +02:00
|
|
|
|
2020-03-09 11:39:20 +01:00
|
|
|
# Get the requestor's identifier and client, if available.
|
2021-08-21 19:24:20 +02:00
|
|
|
request_notes = RequestNotes.get_notes(request)
|
2021-07-09 13:25:36 +02:00
|
|
|
requestor_for_logs = request_notes.requestor_for_logs
|
|
|
|
if requestor_for_logs is None:
|
2021-07-24 20:37:35 +02:00
|
|
|
# Note that request.user is a Union[RemoteZulipServer, UserProfile, AnonymousUser],
|
|
|
|
# if it is present.
|
2021-02-12 08:20:45 +01:00
|
|
|
if hasattr(request, "user") and hasattr(request.user, "format_requestor_for_logs"):
|
2020-03-09 12:21:46 +01:00
|
|
|
requestor_for_logs = request.user.format_requestor_for_logs()
|
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
requestor_for_logs = "unauth@{}".format(get_subdomain(request) or "root")
|
2013-02-11 23:15:34 +01:00
|
|
|
|
2015-08-19 22:20:27 +02:00
|
|
|
if response.streaming:
|
2021-08-10 15:17:44 +02:00
|
|
|
assert isinstance(response, StreamingHttpResponse)
|
|
|
|
content_iter: Optional[Iterator[bytes]] = response.streaming_content
|
2015-08-22 23:18:31 +02:00
|
|
|
content = None
|
2015-08-19 22:20:27 +02:00
|
|
|
else:
|
2015-08-22 23:18:31 +02:00
|
|
|
content = response.content
|
2015-08-22 23:38:01 +02:00
|
|
|
content_iter = None
|
2015-08-19 22:20:27 +02:00
|
|
|
|
2021-07-09 18:10:51 +02:00
|
|
|
assert request_notes.client_name is not None and request_notes.log_data is not None
|
2021-02-12 08:19:30 +01:00
|
|
|
write_log_line(
|
2021-07-09 10:06:04 +02:00
|
|
|
request_notes.log_data,
|
2021-02-12 08:19:30 +01:00
|
|
|
request.path,
|
|
|
|
request.method,
|
|
|
|
remote_ip,
|
|
|
|
requestor_for_logs,
|
2021-07-09 18:10:51 +02:00
|
|
|
request_notes.client_name,
|
|
|
|
client_version=request_notes.client_version,
|
2021-02-12 08:19:30 +01:00
|
|
|
status_code=response.status_code,
|
|
|
|
error_content=content,
|
|
|
|
error_content_iter=content_iter,
|
|
|
|
)
|
2012-10-16 23:52:10 +02:00
|
|
|
return response
|
2012-12-19 20:19:46 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-05-18 11:56:03 +02:00
|
|
|
class JsonErrorHandler(MiddlewareMixin):
|
2021-07-26 16:29:19 +02:00
|
|
|
def __init__(self, get_response: Callable[[HttpRequest], HttpResponse]) -> None:
|
2020-07-02 02:23:58 +02:00
|
|
|
super().__init__(get_response)
|
|
|
|
ignore_logger("zerver.middleware.json_error_handler")
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
def process_exception(
|
|
|
|
self, request: HttpRequest, exception: Exception
|
|
|
|
) -> Optional[HttpResponse]:
|
2020-08-22 20:20:42 +02:00
|
|
|
if isinstance(exception, MissingAuthenticationError):
|
2022-05-12 06:54:12 +02:00
|
|
|
if "text/html" in request.headers.get("Accept", ""):
|
2020-08-22 20:20:42 +02:00
|
|
|
# If this looks like a request from a top-level page in a
|
|
|
|
# browser, send the user to the login page.
|
|
|
|
#
|
|
|
|
# TODO: The next part is a bit questionable; it will
|
|
|
|
# execute the likely intent for intentionally visiting
|
|
|
|
# an API endpoint without authentication in a browser,
|
|
|
|
# but that's an unlikely to be done intentionally often.
|
2022-05-12 06:27:31 +02:00
|
|
|
return HttpResponseRedirect(
|
|
|
|
f"{settings.HOME_NOT_LOGGED_IN}?{urlencode({'next': request.path})}"
|
|
|
|
)
|
2020-08-22 20:20:42 +02:00
|
|
|
if request.path.startswith("/api"):
|
|
|
|
# For API routes, ask for HTTP basic auth (email:apiKey).
|
|
|
|
return json_unauthorized()
|
|
|
|
else:
|
|
|
|
# For /json routes, ask for session authentication.
|
2021-02-12 08:20:45 +01:00
|
|
|
return json_unauthorized(www_authenticate="session")
|
2020-08-22 20:20:42 +02:00
|
|
|
|
2017-07-20 00:19:42 +02:00
|
|
|
if isinstance(exception, JsonableError):
|
2017-07-21 02:19:52 +02:00
|
|
|
return json_response_from_error(exception)
|
2021-08-21 19:24:20 +02:00
|
|
|
if RequestNotes.get_notes(request).error_format == "JSON":
|
2020-07-02 02:23:58 +02:00
|
|
|
capture_exception(exception)
|
2020-07-28 00:37:27 +02:00
|
|
|
json_error_logger = logging.getLogger("zerver.middleware.json_error_handler")
|
|
|
|
json_error_logger.error(traceback.format_exc(), extra=dict(request=request))
|
2021-07-04 10:00:55 +02:00
|
|
|
return json_response(res_type="error", msg=_("Internal server error"), status=500)
|
2012-12-19 20:19:46 +01:00
|
|
|
return None
|
2013-02-12 17:26:12 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-05-18 11:56:03 +02:00
|
|
|
class TagRequests(MiddlewareMixin):
|
2021-02-12 08:19:30 +01:00
|
|
|
def process_view(
|
|
|
|
self, request: HttpRequest, view_func: ViewFuncT, args: List[str], kwargs: Dict[str, Any]
|
|
|
|
) -> None:
|
2013-12-17 22:18:13 +01:00
|
|
|
self.process_request(request)
|
2016-11-29 07:22:02 +01:00
|
|
|
|
2017-11-27 07:33:05 +01:00
|
|
|
def process_request(self, request: HttpRequest) -> None:
|
2013-12-17 22:18:13 +01:00
|
|
|
if request.path.startswith("/api/") or request.path.startswith("/json/"):
|
2021-08-21 19:24:20 +02:00
|
|
|
RequestNotes.get_notes(request).error_format = "JSON"
|
2013-12-17 22:18:13 +01:00
|
|
|
else:
|
2021-08-21 19:24:20 +02:00
|
|
|
RequestNotes.get_notes(request).error_format = "HTML"
|
2013-12-17 22:18:13 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-07-25 03:30:13 +02:00
|
|
|
class CsrfFailureError(JsonableError):
|
|
|
|
http_status_code = 403
|
|
|
|
code = ErrorCode.CSRF_FAILED
|
2021-02-12 08:20:45 +01:00
|
|
|
data_fields = ["reason"]
|
2017-07-25 03:30:13 +02:00
|
|
|
|
2018-05-11 01:39:17 +02:00
|
|
|
def __init__(self, reason: str) -> None:
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
self.reason: str = reason
|
2017-07-25 03:30:13 +02:00
|
|
|
|
|
|
|
@staticmethod
|
2018-05-11 01:39:17 +02:00
|
|
|
def msg_format() -> str:
|
2021-05-10 07:02:14 +02:00
|
|
|
return _("CSRF error: {reason}")
|
2017-07-25 03:30:13 +02:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
|
|
|
def csrf_failure(request: HttpRequest, reason: str = "") -> HttpResponse:
|
2021-08-21 19:24:20 +02:00
|
|
|
if RequestNotes.get_notes(request).error_format == "JSON":
|
2017-07-25 03:30:13 +02:00
|
|
|
return json_response_from_error(CsrfFailureError(reason))
|
2013-12-17 22:18:13 +01:00
|
|
|
else:
|
|
|
|
return html_csrf_failure(request, reason)
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-09-01 02:56:35 +02:00
|
|
|
class LocaleMiddleware(DjangoLocaleMiddleware):
|
2021-07-26 16:29:19 +02:00
|
|
|
def process_response(
|
|
|
|
self, request: HttpRequest, response: HttpResponseBase
|
|
|
|
) -> HttpResponseBase:
|
2020-12-23 13:45:24 +01:00
|
|
|
|
2020-09-01 02:56:35 +02:00
|
|
|
# This is the same as the default LocaleMiddleware, minus the
|
|
|
|
# logic that redirects 404's that lack a prefixed language in
|
|
|
|
# the path into having a language. See
|
|
|
|
# https://code.djangoproject.com/ticket/32005
|
|
|
|
language = translation.get_language()
|
|
|
|
language_from_path = translation.get_language_from_path(request.path_info)
|
2021-02-12 08:20:45 +01:00
|
|
|
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
|
2020-09-01 02:56:35 +02:00
|
|
|
i18n_patterns_used, _ = is_language_prefix_patterns_used(urlconf)
|
|
|
|
if not (i18n_patterns_used and language_from_path):
|
2021-02-12 08:20:45 +01:00
|
|
|
patch_vary_headers(response, ("Accept-Language",))
|
2021-08-10 15:17:44 +02:00
|
|
|
assert language is not None
|
2021-02-12 08:20:45 +01:00
|
|
|
response.setdefault("Content-Language", language)
|
2020-12-23 13:45:24 +01:00
|
|
|
|
|
|
|
# An additional responsibility of our override of this middleware is to save the user's language
|
|
|
|
# preference in a cookie. That determination is made by code handling the request
|
2021-07-09 15:17:33 +02:00
|
|
|
# and saved in the set_language flag so that it can be used here.
|
2021-08-21 19:24:20 +02:00
|
|
|
set_language = RequestNotes.get_notes(request).set_language
|
2021-07-09 15:17:33 +02:00
|
|
|
if set_language is not None:
|
|
|
|
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, set_language)
|
2020-12-23 13:45:24 +01:00
|
|
|
|
2020-09-01 02:56:35 +02:00
|
|
|
return response
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-05-18 11:56:03 +02:00
|
|
|
class RateLimitMiddleware(MiddlewareMixin):
|
2021-02-12 08:19:30 +01:00
|
|
|
def set_response_headers(
|
|
|
|
self, response: HttpResponse, rate_limit_results: List[RateLimitResult]
|
|
|
|
) -> None:
|
2019-12-28 20:23:18 +01:00
|
|
|
# The limit on the action that was requested is the minimum of the limits that get applied:
|
2020-09-02 06:20:26 +02:00
|
|
|
limit = min(result.entity.max_api_calls() for result in rate_limit_results)
|
2021-02-12 08:20:45 +01:00
|
|
|
response["X-RateLimit-Limit"] = str(limit)
|
2020-10-23 02:43:28 +02:00
|
|
|
# Same principle applies to remaining API calls:
|
2020-09-02 06:20:26 +02:00
|
|
|
remaining_api_calls = min(result.remaining for result in rate_limit_results)
|
2021-02-12 08:20:45 +01:00
|
|
|
response["X-RateLimit-Remaining"] = str(remaining_api_calls)
|
2019-12-28 20:23:18 +01:00
|
|
|
|
|
|
|
# The full reset time is the maximum of the reset times for the limits that get applied:
|
2020-09-02 06:20:26 +02:00
|
|
|
reset_time = time.time() + max(result.secs_to_freedom for result in rate_limit_results)
|
2021-02-12 08:20:45 +01:00
|
|
|
response["X-RateLimit-Reset"] = str(int(reset_time))
|
2019-12-28 20:23:18 +01:00
|
|
|
|
2017-11-27 07:33:05 +01:00
|
|
|
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
|
2013-05-29 23:58:07 +02:00
|
|
|
if not settings.RATE_LIMITING:
|
|
|
|
return response
|
|
|
|
|
|
|
|
# Add X-RateLimit-*** headers
|
2021-08-21 19:24:20 +02:00
|
|
|
ratelimits_applied = RequestNotes.get_notes(request).ratelimits_applied
|
2021-07-19 23:27:29 +02:00
|
|
|
if len(ratelimits_applied) > 0:
|
|
|
|
self.set_response_headers(response, ratelimits_applied)
|
2019-12-28 20:23:18 +01:00
|
|
|
|
2013-05-29 23:58:07 +02:00
|
|
|
return response
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2017-05-18 11:56:03 +02:00
|
|
|
class FlushDisplayRecipientCache(MiddlewareMixin):
|
2017-11-27 07:33:05 +01:00
|
|
|
def process_response(self, request: HttpRequest, response: HttpResponse) -> HttpResponse:
|
2013-12-18 23:00:14 +01:00
|
|
|
# We flush the per-request caches after every request, so they
|
|
|
|
# are not shared at all between requests.
|
|
|
|
flush_per_request_caches()
|
2013-11-08 21:13:34 +01:00
|
|
|
return response
|
2015-01-16 05:59:20 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-04-06 23:32:50 +02:00
|
|
|
class HostDomainMiddleware(MiddlewareMixin):
|
2020-08-07 02:09:59 +02:00
|
|
|
def process_request(self, request: HttpRequest) -> Optional[HttpResponse]:
|
|
|
|
# Match against ALLOWED_HOSTS, which is rather permissive;
|
|
|
|
# failure will raise DisallowedHost, which is a 400.
|
|
|
|
request.get_host()
|
|
|
|
|
|
|
|
# This check is important to avoid doing the extra work of
|
|
|
|
# `get_realm` (which does a database query that could be
|
|
|
|
# problematic for Tornado). Also the error page below is only
|
|
|
|
# appropriate for a page visited in a browser, not the API.
|
|
|
|
#
|
|
|
|
# API authentication will end up checking for an invalid
|
|
|
|
# realm, and throw a JSON-format error if appropriate.
|
|
|
|
if request.path.startswith(("/static/", "/api/", "/json/")):
|
|
|
|
return None
|
|
|
|
|
|
|
|
subdomain = get_subdomain(request)
|
2021-09-26 18:54:45 +02:00
|
|
|
if subdomain == settings.SOCIAL_AUTH_SUBDOMAIN:
|
|
|
|
# Realms are not supposed to exist on SOCIAL_AUTH_SUBDOMAIN.
|
|
|
|
return None
|
|
|
|
|
|
|
|
request_notes = RequestNotes.get_notes(request)
|
|
|
|
try:
|
|
|
|
request_notes.realm = get_realm(subdomain)
|
2021-07-09 17:16:26 +02:00
|
|
|
request_notes.has_fetched_realm = True
|
2021-09-26 18:54:45 +02:00
|
|
|
except Realm.DoesNotExist:
|
|
|
|
if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
|
|
|
|
# The root domain is used for creating new
|
|
|
|
# organizations even if it does not host a realm.
|
|
|
|
return None
|
|
|
|
|
|
|
|
return render(request, "zerver/invalid_realm.html", status=404)
|
|
|
|
|
2020-08-07 02:09:59 +02:00
|
|
|
return None
|
2017-01-30 23:19:38 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
middleware: Do not trust X-Forwarded-For; use X-Real-Ip, set from nginx.
The `X-Forwarded-For` header is a list of proxies' IP addresses; each
proxy appends the remote address of the host it received its request
from to the list, as it passes the request down. A naïve parsing, as
SetRemoteAddrFromForwardedFor did, would thus interpret the first
address in the list as the client's IP.
However, clients can pass in arbitrary `X-Forwarded-For` headers,
which would allow them to spoof their IP address. `nginx`'s behavior
is to treat the addresses as untrusted unless they match an allowlist
of known proxies. By setting `real_ip_recursive on`, it also allows
this behavior to be applied repeatedly, moving from right to left down
the `X-Forwarded-For` list, stopping at the right-most that is
untrusted.
Rather than re-implement this logic in Django, pass the first
untrusted value that `nginx` computer down into Django via `X-Real-Ip`
header. This allows consistent IP addresses in logs between `nginx`
and Django.
Proxied calls into Tornado (which don't use UWSGI) already passed this
header, as Tornado logging respects it.
2021-03-23 22:40:08 +01:00
|
|
|
class SetRemoteAddrFromRealIpHeader(MiddlewareMixin):
|
|
|
|
"""Middleware that sets REMOTE_ADDR based on the X-Real-Ip header.
|
|
|
|
|
|
|
|
This middleware is similar to Django's old
|
|
|
|
SetRemoteAddrFromForwardedFor middleware. We use X-Real-Ip, and
|
|
|
|
not X-Forwarded-For, because the latter is a list of proxies, some
|
|
|
|
number of which are trusted by us, and some of which could be
|
|
|
|
arbitrarily set by the user. nginx has already parsed which are
|
|
|
|
which, and has set X-Real-Ip to the first one, going right to
|
|
|
|
left, which is untrusted.
|
|
|
|
|
|
|
|
Since we are always deployed behind nginx, we can trust the
|
|
|
|
X-Real-Ip which is so set. In development, we fall back to the
|
|
|
|
REMOTE_ADDR supplied by the server.
|
2017-01-30 23:19:38 +01:00
|
|
|
|
|
|
|
"""
|
2020-04-22 01:45:30 +02:00
|
|
|
|
2017-11-27 07:33:05 +01:00
|
|
|
def process_request(self, request: HttpRequest) -> None:
|
2017-01-30 23:19:38 +01:00
|
|
|
try:
|
2022-05-12 06:54:12 +02:00
|
|
|
real_ip = request.headers["X-Real-IP"]
|
2017-01-30 23:19:38 +01:00
|
|
|
except KeyError:
|
2022-05-12 06:54:12 +02:00
|
|
|
pass
|
2017-01-30 23:19:38 +01:00
|
|
|
else:
|
2021-02-12 08:20:45 +01:00
|
|
|
request.META["REMOTE_ADDR"] = real_ip
|
2018-12-14 23:28:00 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2019-02-28 02:30:16 +01:00
|
|
|
def alter_content(request: HttpRequest, content: bytes) -> bytes:
|
|
|
|
first_paragraph_text = get_content_description(content, request)
|
2021-08-21 19:24:20 +02:00
|
|
|
placeholder_open_graph_description = RequestNotes.get_notes(
|
2021-07-09 15:17:33 +02:00
|
|
|
request
|
|
|
|
).placeholder_open_graph_description
|
|
|
|
assert placeholder_open_graph_description is not None
|
2021-02-12 08:19:30 +01:00
|
|
|
return content.replace(
|
2021-08-02 23:20:39 +02:00
|
|
|
placeholder_open_graph_description.encode(),
|
|
|
|
first_paragraph_text.encode(),
|
2021-02-12 08:19:30 +01:00
|
|
|
)
|
|
|
|
|
2018-12-14 23:28:00 +01:00
|
|
|
|
2019-02-28 02:30:16 +01:00
|
|
|
class FinalizeOpenGraphDescription(MiddlewareMixin):
|
2021-02-12 08:19:30 +01:00
|
|
|
def process_response(
|
|
|
|
self, request: HttpRequest, response: StreamingHttpResponse
|
|
|
|
) -> StreamingHttpResponse:
|
2018-12-14 23:28:00 +01:00
|
|
|
|
2021-08-21 19:24:20 +02:00
|
|
|
if RequestNotes.get_notes(request).placeholder_open_graph_description is not None:
|
2018-12-14 23:28:00 +01:00
|
|
|
assert not response.streaming
|
2019-02-28 02:30:16 +01:00
|
|
|
response.content = alter_content(request, response.content)
|
2018-12-14 23:28:00 +01:00
|
|
|
return response
|
2020-02-14 20:29:05 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
|
2020-02-14 20:29:05 +01:00
|
|
|
class ZulipCommonMiddleware(CommonMiddleware):
|
|
|
|
"""
|
|
|
|
Patched version of CommonMiddleware to disable the APPEND_SLASH
|
|
|
|
redirect behavior inside Tornado.
|
|
|
|
|
|
|
|
While this has some correctness benefit in encouraging clients
|
|
|
|
to implement the API correctly, this also saves about 600us in
|
|
|
|
the runtime of every GET /events query, as the APPEND_SLASH
|
|
|
|
route resolution logic is surprisingly expensive.
|
|
|
|
|
|
|
|
TODO: We should probably extend this behavior to apply to all of
|
|
|
|
our API routes. The APPEND_SLASH behavior is really only useful
|
|
|
|
for non-API endpoints things like /login. But doing that
|
|
|
|
transition will require more careful testing.
|
|
|
|
"""
|
2020-04-22 01:45:30 +02:00
|
|
|
|
2020-02-14 20:29:05 +01:00
|
|
|
def should_redirect_with_slash(self, request: HttpRequest) -> bool:
|
|
|
|
if settings.RUNNING_INSIDE_TORNADO:
|
|
|
|
return False
|
|
|
|
return super().should_redirect_with_slash(request)
|
2021-09-10 18:36:56 +02:00
|
|
|
|
|
|
|
|
|
|
|
def validate_scim_bearer_token(request: HttpRequest) -> Optional[SCIMClient]:
|
|
|
|
"""
|
|
|
|
This function verifies the request is allowed to make SCIM requests on this subdomain,
|
|
|
|
by checking the provided bearer token and ensuring it matches a scim client configured
|
|
|
|
for this subdomain in settings.SCIM_CONFIG.
|
2021-10-18 16:30:46 +02:00
|
|
|
If successful, returns the corresponding SCIMClient object. Returns None otherwise.
|
2021-09-10 18:36:56 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
subdomain = get_subdomain(request)
|
|
|
|
scim_config_dict = settings.SCIM_CONFIG.get(subdomain)
|
|
|
|
if not scim_config_dict:
|
|
|
|
return None
|
|
|
|
|
|
|
|
valid_bearer_token = scim_config_dict.get("bearer_token")
|
|
|
|
scim_client_name = scim_config_dict.get("scim_client_name")
|
|
|
|
# We really don't want a misconfiguration where this is unset,
|
|
|
|
# allowing free access to the SCIM API:
|
|
|
|
assert valid_bearer_token
|
|
|
|
assert scim_client_name
|
|
|
|
|
|
|
|
if request.headers.get("Authorization") != f"Bearer {valid_bearer_token}":
|
|
|
|
return None
|
|
|
|
|
|
|
|
request_notes = RequestNotes.get_notes(request)
|
|
|
|
assert request_notes.realm
|
|
|
|
|
|
|
|
# While API authentication code paths are sufficiently high
|
|
|
|
# traffic that we prefer to use a cache, SCIM is much lower
|
|
|
|
# traffic, and doing a database query is plenty fast.
|
|
|
|
return SCIMClient.objects.get(realm=request_notes.realm, name=scim_client_name)
|
|
|
|
|
|
|
|
|
|
|
|
class ZulipSCIMAuthCheckMiddleware(SCIMAuthCheckMiddleware):
|
|
|
|
"""
|
2021-10-18 16:30:46 +02:00
|
|
|
Overridden version of middleware implemented in django-scim2
|
2021-09-10 18:36:56 +02:00
|
|
|
(https://github.com/15five/django-scim2/blob/master/src/django_scim/middleware.py)
|
|
|
|
to also handle authenticating the client.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def process_request(self, request: HttpRequest) -> Optional[HttpResponse]:
|
|
|
|
# This determines whether this is a SCIM request based on the request's path
|
|
|
|
# and if it is, logs request information, including the body, as well as the response
|
|
|
|
# for debugging purposes to the `django_scim.middleware` logger, at DEBUG level.
|
|
|
|
# We keep those logs in /var/log/zulip/scim.log
|
|
|
|
if self.should_log_request(request):
|
|
|
|
self.log_request(request)
|
|
|
|
|
|
|
|
# Here we verify the request is indeed to a SCIM endpoint. That's ensured
|
|
|
|
# by comparing the path with self.reverse_url, which is the root SCIM path /scim/b2/.
|
|
|
|
# Of course we don't want to proceed with authenticating the request for SCIM
|
|
|
|
# if a non-SCIM endpoint is being queried.
|
|
|
|
if not request.path.startswith(self.reverse_url):
|
|
|
|
return None
|
|
|
|
|
|
|
|
scim_client = validate_scim_bearer_token(request)
|
|
|
|
if not scim_client:
|
|
|
|
response = HttpResponse(status=401)
|
|
|
|
response["WWW-Authenticate"] = scim_settings.WWW_AUTHENTICATE_HEADER
|
|
|
|
return response
|
|
|
|
|
|
|
|
# The client has been successfully authenticated for SCIM on this subdomain,
|
|
|
|
# so we can assign the corresponding SCIMClient object to request.user - which
|
|
|
|
# will allow this request to pass request.user.is_authenticated checks from now on,
|
|
|
|
# to be served by the relevant views implemented in django-scim2.
|
|
|
|
request.user = scim_client
|
|
|
|
return None
|