2020-06-11 00:54:34 +02:00
|
|
|
import time
|
2021-12-22 14:37:12 +01:00
|
|
|
import uuid
|
2021-08-05 20:25:18 +02:00
|
|
|
from contextlib import contextmanager
|
2022-06-08 04:52:09 +02:00
|
|
|
from typing import IO, TYPE_CHECKING, Any, Callable, Iterator, Optional, Sequence
|
2021-07-08 14:46:47 +02:00
|
|
|
from unittest import mock, skipUnless
|
2020-06-11 00:54:34 +02:00
|
|
|
|
|
|
|
import DNS
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
import orjson
|
|
|
|
from circuitbreaker import CircuitBreakerMonitor
|
2014-01-31 18:53:33 +01:00
|
|
|
from django.conf import settings
|
|
|
|
from django.core.exceptions import ValidationError
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
from django.test import override_settings
|
2021-07-08 14:46:47 +02:00
|
|
|
from django.utils.timezone import now as timezone_now
|
2023-10-12 19:43:45 +02:00
|
|
|
from typing_extensions import override
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2016-11-06 00:29:55 +01:00
|
|
|
from zerver.forms import email_is_not_mit_mailing_list
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
from zerver.lib.cache import cache_delete
|
2014-01-31 18:53:33 +01:00
|
|
|
from zerver.lib.rate_limiter import (
|
2021-07-08 14:46:47 +02:00
|
|
|
RateLimitedIPAddr,
|
2017-07-31 07:03:52 +02:00
|
|
|
RateLimitedUser,
|
2022-11-17 09:30:48 +01:00
|
|
|
RateLimiterLockingError,
|
2022-08-05 17:40:03 +02:00
|
|
|
get_tor_ips,
|
2014-01-31 18:53:33 +01:00
|
|
|
)
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.test_classes import ZulipTestCase
|
2023-06-07 23:01:42 +02:00
|
|
|
from zerver.lib.test_helpers import ratelimit_rule
|
2018-08-11 16:26:46 +02:00
|
|
|
from zerver.lib.zephyr import compute_mit_user_fullname
|
2023-03-21 14:11:44 +01:00
|
|
|
from zerver.models import PushDeviceToken, UserProfile
|
2021-07-08 14:46:47 +02:00
|
|
|
|
|
|
|
if settings.ZILENCER_ENABLED:
|
|
|
|
from zilencer.models import RateLimitedRemoteZulipServer, RemoteZulipServer
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2022-06-08 04:52:09 +02:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from django.test.client import _MonkeyPatchedWSGIResponse as TestHttpResponse
|
|
|
|
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2017-05-23 02:33:04 +02:00
|
|
|
class MITNameTest(ZulipTestCase):
|
2017-11-05 10:51:25 +01:00
|
|
|
def test_valid_hesiod(self) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
with mock.patch(
|
2021-02-12 08:20:45 +01:00
|
|
|
"DNS.dnslookup",
|
2021-02-12 08:19:30 +01:00
|
|
|
return_value=[
|
2021-02-12 08:20:45 +01:00
|
|
|
["starnine:*:84233:101:Athena Consulting Exchange User,,,:/mit/starnine:/bin/bash"]
|
2021-02-12 08:19:30 +01:00
|
|
|
],
|
|
|
|
):
|
|
|
|
self.assertEqual(
|
|
|
|
compute_mit_user_fullname(self.mit_email("starnine")),
|
|
|
|
"Athena Consulting Exchange User",
|
|
|
|
)
|
|
|
|
with mock.patch(
|
2021-02-12 08:20:45 +01:00
|
|
|
"DNS.dnslookup",
|
|
|
|
return_value=[["sipbexch:*:87824:101:Exch Sipb,,,:/mit/sipbexch:/bin/athena/bash"]],
|
2021-02-12 08:19:30 +01:00
|
|
|
):
|
2016-12-16 02:01:34 +01:00
|
|
|
self.assertEqual(compute_mit_user_fullname("sipbexch@mit.edu"), "Exch Sipb")
|
2016-04-28 07:17:10 +02:00
|
|
|
|
2017-11-05 10:51:25 +01:00
|
|
|
def test_invalid_hesiod(self) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
with mock.patch(
|
2021-02-12 08:20:45 +01:00
|
|
|
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
|
2021-02-12 08:19:30 +01:00
|
|
|
):
|
2016-12-16 02:01:34 +01:00
|
|
|
self.assertEqual(compute_mit_user_fullname("1234567890@mit.edu"), "1234567890@mit.edu")
|
2021-02-12 08:19:30 +01:00
|
|
|
with mock.patch(
|
2021-02-12 08:20:45 +01:00
|
|
|
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
|
2021-02-12 08:19:30 +01:00
|
|
|
):
|
2016-12-16 02:01:34 +01:00
|
|
|
self.assertEqual(compute_mit_user_fullname("ec-discuss@mit.edu"), "ec-discuss@mit.edu")
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2017-11-05 10:51:25 +01:00
|
|
|
def test_mailinglist(self) -> None:
|
2021-02-12 08:19:30 +01:00
|
|
|
with mock.patch(
|
2021-02-12 08:20:45 +01:00
|
|
|
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
|
2021-02-12 08:19:30 +01:00
|
|
|
):
|
2016-11-06 00:29:55 +01:00
|
|
|
self.assertRaises(ValidationError, email_is_not_mit_mailing_list, "1234567890@mit.edu")
|
2021-02-12 08:19:30 +01:00
|
|
|
with mock.patch(
|
2021-02-12 08:20:45 +01:00
|
|
|
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
|
2021-02-12 08:19:30 +01:00
|
|
|
):
|
2016-11-06 00:29:55 +01:00
|
|
|
self.assertRaises(ValidationError, email_is_not_mit_mailing_list, "ec-discuss@mit.edu")
|
2016-11-29 07:22:02 +01:00
|
|
|
|
2017-11-05 10:51:25 +01:00
|
|
|
def test_notmailinglist(self) -> None:
|
2021-02-12 08:20:45 +01:00
|
|
|
with mock.patch("DNS.dnslookup", return_value=[["POP IMAP.EXCHANGE.MIT.EDU starnine"]]):
|
2016-11-06 00:29:55 +01:00
|
|
|
email_is_not_mit_mailing_list("sipbexch@mit.edu")
|
2014-01-31 18:53:33 +01:00
|
|
|
|
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
class RateLimitTests(ZulipTestCase):
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2017-11-05 10:51:25 +01:00
|
|
|
def setUp(self) -> None:
|
2019-10-19 20:47:00 +02:00
|
|
|
super().setUp()
|
2020-08-22 15:32:37 +02:00
|
|
|
|
|
|
|
# Some tests here can be somewhat timing-sensitive in a way
|
|
|
|
# that can't be eliminated, e.g. due to testing things that rely
|
2020-10-23 02:43:28 +02:00
|
|
|
# on Redis' internal timing mechanism which we can't mock.
|
2020-08-22 15:32:37 +02:00
|
|
|
# The first API request when running a suite of tests is slow
|
|
|
|
# and can take multiple seconds. This is not a problem when running
|
|
|
|
# multiple tests, but if an individual, time-sensitive test from this class
|
|
|
|
# is run, the first API request it makes taking a lot of time can throw things off
|
|
|
|
# and cause the test to fail. Thus we do a dummy API request here to warm up
|
|
|
|
# the system and allow the tests to assume their requests won't take multiple seconds.
|
2021-02-12 08:20:45 +01:00
|
|
|
user = self.example_user("hamlet")
|
2020-08-22 15:32:37 +02:00
|
|
|
self.api_get(user, "/api/v1/messages")
|
|
|
|
|
2014-01-31 18:53:33 +01:00
|
|
|
settings.RATE_LIMITING = True
|
|
|
|
|
2023-10-12 19:43:45 +02:00
|
|
|
@override
|
2017-11-05 10:51:25 +01:00
|
|
|
def tearDown(self) -> None:
|
2014-01-31 18:53:33 +01:00
|
|
|
settings.RATE_LIMITING = False
|
|
|
|
|
2019-10-18 16:11:48 +02:00
|
|
|
super().tearDown()
|
|
|
|
|
2022-06-08 04:52:09 +02:00
|
|
|
def send_api_message(self, user: UserProfile, content: str) -> "TestHttpResponse":
|
2021-02-12 08:19:30 +01:00
|
|
|
return self.api_post(
|
|
|
|
user,
|
|
|
|
"/api/v1/messages",
|
|
|
|
{
|
|
|
|
"type": "stream",
|
2022-09-13 08:39:44 +02:00
|
|
|
"to": orjson.dumps("Verona").decode(),
|
2021-02-12 08:19:30 +01:00
|
|
|
"content": content,
|
|
|
|
"topic": "whatever",
|
|
|
|
},
|
|
|
|
)
|
2016-09-27 21:41:42 +02:00
|
|
|
|
2022-06-08 04:52:09 +02:00
|
|
|
def send_unauthed_api_request(self, **kwargs: Any) -> "TestHttpResponse":
|
2021-11-03 21:38:33 +01:00
|
|
|
result = self.client_get("/json/messages", **kwargs)
|
2021-07-08 14:46:47 +02:00
|
|
|
# We're not making a correct request here, but rate-limiting is supposed
|
|
|
|
# to happen before the request fails due to not being correctly made. Thus
|
|
|
|
# we expect either an 400 error if the request is allowed by the rate limiter,
|
|
|
|
# or 429 if we're above the limit. We don't expect to see other status codes here,
|
|
|
|
# so we assert for safety.
|
|
|
|
self.assertIn(result.status_code, [400, 429])
|
|
|
|
return result
|
|
|
|
|
2017-11-05 10:51:25 +01:00
|
|
|
def test_headers(self) -> None:
|
2021-02-12 08:20:45 +01:00
|
|
|
user = self.example_user("hamlet")
|
2020-03-04 14:05:25 +01:00
|
|
|
RateLimitedUser(user).clear_history()
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2020-03-10 11:48:26 +01:00
|
|
|
result = self.send_api_message(user, "some stuff")
|
2022-06-08 04:52:09 +02:00
|
|
|
self.assertTrue("X-RateLimit-Remaining" in result.headers)
|
|
|
|
self.assertTrue("X-RateLimit-Limit" in result.headers)
|
|
|
|
self.assertTrue("X-RateLimit-Reset" in result.headers)
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2017-11-05 10:51:25 +01:00
|
|
|
def test_ratelimit_decrease(self) -> None:
|
2021-02-12 08:20:45 +01:00
|
|
|
user = self.example_user("hamlet")
|
2020-03-04 14:05:25 +01:00
|
|
|
RateLimitedUser(user).clear_history()
|
2020-03-10 11:48:26 +01:00
|
|
|
result = self.send_api_message(user, "some stuff")
|
2021-02-12 08:20:45 +01:00
|
|
|
limit = int(result["X-RateLimit-Remaining"])
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2020-03-10 11:48:26 +01:00
|
|
|
result = self.send_api_message(user, "some stuff 2")
|
2021-02-12 08:20:45 +01:00
|
|
|
newlimit = int(result["X-RateLimit-Remaining"])
|
2014-01-31 18:53:33 +01:00
|
|
|
self.assertEqual(limit, newlimit + 1)
|
|
|
|
|
2021-07-25 17:07:06 +02:00
|
|
|
def do_test_hit_ratelimits(
|
|
|
|
self,
|
2022-06-08 04:52:09 +02:00
|
|
|
request_func: Callable[[], "TestHttpResponse"],
|
2021-11-03 21:34:22 +01:00
|
|
|
is_json: bool = True,
|
2022-06-08 04:52:09 +02:00
|
|
|
) -> None:
|
|
|
|
def api_assert_func(result: "TestHttpResponse") -> None:
|
2021-07-25 17:07:06 +02:00
|
|
|
self.assertEqual(result.status_code, 429)
|
2021-11-03 21:34:22 +01:00
|
|
|
self.assertEqual(result.headers["Content-Type"], "application/json")
|
2021-07-25 17:07:06 +02:00
|
|
|
json = result.json()
|
|
|
|
self.assertEqual(json.get("result"), "error")
|
|
|
|
self.assertIn("API usage exceeded rate limit", json.get("msg"))
|
|
|
|
self.assertEqual(json.get("retry-after"), 0.5)
|
2022-06-08 04:52:09 +02:00
|
|
|
self.assertTrue("Retry-After" in result.headers)
|
2021-07-25 17:07:06 +02:00
|
|
|
self.assertEqual(result["Retry-After"], "0.5")
|
|
|
|
|
2022-06-08 04:52:09 +02:00
|
|
|
def user_facing_assert_func(result: "TestHttpResponse") -> None:
|
2021-11-03 21:34:22 +01:00
|
|
|
self.assertEqual(result.status_code, 429)
|
|
|
|
self.assertNotEqual(result.headers["Content-Type"], "application/json")
|
|
|
|
self.assert_in_response("Rate limit exceeded.", result)
|
|
|
|
|
|
|
|
if is_json:
|
|
|
|
assert_func = api_assert_func
|
|
|
|
else:
|
|
|
|
assert_func = user_facing_assert_func
|
2021-07-25 17:07:06 +02:00
|
|
|
|
2017-05-06 13:56:02 +02:00
|
|
|
start_time = time.time()
|
2014-01-31 18:53:33 +01:00
|
|
|
for i in range(6):
|
2023-09-12 21:10:57 +02:00
|
|
|
with mock.patch("time.time", return_value=start_time + i * 0.1):
|
2021-07-08 14:46:47 +02:00
|
|
|
result = request_func()
|
2021-08-06 11:11:08 +02:00
|
|
|
if i < 5:
|
|
|
|
self.assertNotEqual(result.status_code, 429)
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2021-07-25 17:07:06 +02:00
|
|
|
assert_func(result)
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2021-07-08 14:46:47 +02:00
|
|
|
# We simulate waiting a second here, rather than force-clearing our history,
|
2014-01-31 18:53:33 +01:00
|
|
|
# to make sure the rate-limiting code automatically forgives a user
|
|
|
|
# after some time has passed.
|
2023-09-12 21:10:57 +02:00
|
|
|
with mock.patch("time.time", return_value=start_time + 1.01):
|
2021-07-08 14:46:47 +02:00
|
|
|
result = request_func()
|
|
|
|
|
2021-11-05 00:40:30 +01:00
|
|
|
self.assertNotEqual(result.status_code, 429)
|
2021-07-08 14:46:47 +02:00
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="api_by_user")
|
2021-07-08 14:46:47 +02:00
|
|
|
def test_hit_ratelimits_as_user(self) -> None:
|
|
|
|
user = self.example_user("cordelia")
|
|
|
|
RateLimitedUser(user).clear_history()
|
2014-01-31 18:53:33 +01:00
|
|
|
|
2021-07-08 14:46:47 +02:00
|
|
|
self.do_test_hit_ratelimits(lambda: self.send_api_message(user, "some stuff"))
|
2019-03-16 15:46:50 +01:00
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="email_change_by_user")
|
2021-11-03 23:20:55 +01:00
|
|
|
def test_hit_change_email_ratelimit_as_user(self) -> None:
|
|
|
|
user = self.example_user("cordelia")
|
|
|
|
RateLimitedUser(user).clear_history()
|
|
|
|
|
|
|
|
emails = ["new-email-{n}@zulip.com" for n in range(1, 8)]
|
|
|
|
self.do_test_hit_ratelimits(
|
|
|
|
lambda: self.api_patch(user, "/api/v1/settings", {"email": emails.pop()}),
|
|
|
|
)
|
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="api_by_ip")
|
2021-07-08 14:46:47 +02:00
|
|
|
def test_hit_ratelimits_as_ip(self) -> None:
|
2021-08-05 20:25:18 +02:00
|
|
|
self.do_test_hit_ratelimits(self.send_unauthed_api_request)
|
2021-07-08 14:46:47 +02:00
|
|
|
|
2021-11-03 21:38:33 +01:00
|
|
|
# Other IPs should not be rate-limited
|
|
|
|
resp = self.send_unauthed_api_request(REMOTE_ADDR="127.0.0.2")
|
|
|
|
self.assertNotEqual(resp.status_code, 429)
|
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="sends_email_by_ip")
|
2021-07-19 20:31:09 +02:00
|
|
|
def test_create_realm_rate_limiting(self) -> None:
|
|
|
|
with self.settings(OPEN_REALM_CREATION=True):
|
2021-08-05 20:25:18 +02:00
|
|
|
self.do_test_hit_ratelimits(
|
2023-03-21 14:11:44 +01:00
|
|
|
lambda: self.submit_realm_creation_form(
|
2023-06-07 04:24:01 +02:00
|
|
|
email="new@zulip.com", realm_subdomain="custom-test", realm_name="Zulip test"
|
2023-03-03 11:58:00 +01:00
|
|
|
),
|
2021-11-03 21:34:22 +01:00
|
|
|
is_json=False,
|
2021-08-05 20:25:18 +02:00
|
|
|
)
|
2021-07-19 20:31:09 +02:00
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="sends_email_by_ip")
|
2021-08-05 11:13:22 +02:00
|
|
|
def test_find_account_rate_limiting(self) -> None:
|
2021-11-03 21:30:12 +01:00
|
|
|
self.do_test_hit_ratelimits(
|
|
|
|
lambda: self.client_post("/accounts/find/", {"emails": "new@zulip.com"}),
|
2021-11-03 21:34:22 +01:00
|
|
|
is_json=False,
|
2021-11-03 21:30:12 +01:00
|
|
|
)
|
2021-08-05 11:13:22 +02:00
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="sends_email_by_ip")
|
2021-11-05 02:21:26 +01:00
|
|
|
def test_password_reset_rate_limiting(self) -> None:
|
|
|
|
with self.assertLogs(level="INFO") as m:
|
|
|
|
self.do_test_hit_ratelimits(
|
|
|
|
lambda: self.client_post("/accounts/password/reset/", {"email": "new@zulip.com"}),
|
|
|
|
is_json=False,
|
|
|
|
)
|
|
|
|
self.assertEqual(
|
|
|
|
m.output,
|
|
|
|
["INFO:root:Too many password reset attempts for email new@zulip.com from 127.0.0.1"],
|
|
|
|
)
|
|
|
|
|
2021-11-03 21:30:12 +01:00
|
|
|
# Test whether submitting multiple emails is handled correctly.
|
|
|
|
# The limit is set to 10 per second, so 5 requests with 2 emails
|
|
|
|
# submitted in each should be allowed.
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 10, domain="sends_email_by_ip")
|
2021-11-03 21:30:12 +01:00
|
|
|
def test_find_account_rate_limiting_multiple(self) -> None:
|
|
|
|
self.do_test_hit_ratelimits(
|
|
|
|
lambda: self.client_post("/accounts/find/", {"emails": "new@zulip.com,new2@zulip.com"}),
|
2021-11-03 21:34:22 +01:00
|
|
|
is_json=False,
|
2021-11-03 21:30:12 +01:00
|
|
|
)
|
2021-08-05 11:13:22 +02:00
|
|
|
|
2021-11-05 01:21:40 +01:00
|
|
|
# If I submit with 3 emails and the rate-limit is 2, I should get
|
|
|
|
# a 429 and not send any emails.
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 2, domain="sends_email_by_ip")
|
2021-11-05 01:21:40 +01:00
|
|
|
def test_find_account_rate_limiting_multiple_one_request(self) -> None:
|
|
|
|
emails = [
|
|
|
|
"iago@zulip.com",
|
|
|
|
"cordelia@zulip.com",
|
|
|
|
"hamlet@zulip.com",
|
|
|
|
]
|
|
|
|
resp = self.client_post("/accounts/find/", {"emails": ",".join(emails)})
|
|
|
|
self.assertEqual(resp.status_code, 429)
|
|
|
|
|
|
|
|
from django.core.mail import outbox
|
|
|
|
|
|
|
|
self.assert_length(outbox, 0)
|
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="sends_email_by_ip")
|
2021-11-04 18:26:53 +01:00
|
|
|
def test_register_account_rate_limiting(self) -> None:
|
|
|
|
self.do_test_hit_ratelimits(
|
|
|
|
lambda: self.client_post("/register/", {"email": "new@zulip.com"}),
|
|
|
|
is_json=False,
|
|
|
|
)
|
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="sends_email_by_ip")
|
2021-11-03 21:40:28 +01:00
|
|
|
def test_combined_ip_limits(self) -> None:
|
|
|
|
# Alternate requests to /new/ and /accounts/find/
|
|
|
|
request_count = 0
|
|
|
|
|
2022-06-08 04:52:09 +02:00
|
|
|
def alternate_requests() -> "TestHttpResponse":
|
2021-11-03 21:40:28 +01:00
|
|
|
nonlocal request_count
|
|
|
|
request_count += 1
|
|
|
|
if request_count % 2 == 1:
|
2023-03-21 14:11:44 +01:00
|
|
|
return self.submit_realm_creation_form(
|
2023-06-07 04:24:01 +02:00
|
|
|
email="new@zulip.com", realm_subdomain="custom-test", realm_name="Zulip test"
|
2023-03-03 11:58:00 +01:00
|
|
|
)
|
2021-11-03 21:40:28 +01:00
|
|
|
else:
|
|
|
|
return self.client_post("/accounts/find/", {"emails": "new@zulip.com"})
|
|
|
|
|
|
|
|
self.do_test_hit_ratelimits(alternate_requests, is_json=False)
|
|
|
|
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
@contextmanager
|
|
|
|
def tor_mock(
|
|
|
|
self,
|
|
|
|
side_effect: Optional[Exception] = None,
|
|
|
|
read_data: Sequence[str] = ["1.2.3.4", "5.6.7.8"],
|
|
|
|
) -> Iterator[mock.Mock]:
|
|
|
|
# We need to reset the circuitbreaker before starting. We
|
|
|
|
# patch the .opened property to be false, then call the
|
|
|
|
# function, so it resets to closed.
|
|
|
|
with mock.patch("builtins.open", mock.mock_open(read_data=orjson.dumps(["1.2.3.4"]))):
|
|
|
|
with mock.patch(
|
|
|
|
"circuitbreaker.CircuitBreaker.opened", new_callable=mock.PropertyMock
|
|
|
|
) as mock_opened:
|
|
|
|
mock_opened.return_value = False
|
2022-08-05 17:40:03 +02:00
|
|
|
get_tor_ips()
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
|
|
|
|
# Having closed it, it's now cached. Clear the cache.
|
|
|
|
assert CircuitBreakerMonitor.get("get_tor_ips").closed
|
|
|
|
cache_delete("tor_ip_addresses:")
|
|
|
|
|
|
|
|
builtin_open = open
|
|
|
|
if side_effect:
|
|
|
|
tor_open = mock.MagicMock(side_effect=side_effect)
|
|
|
|
else:
|
|
|
|
tor_open = mock.mock_open(read_data=orjson.dumps(read_data))
|
|
|
|
|
|
|
|
def selective_mock_open(*args: Any, **kwargs: Any) -> IO[Any]:
|
|
|
|
if args[0] == settings.TOR_EXIT_NODE_FILE_PATH:
|
|
|
|
return tor_open(*args, **kwargs)
|
|
|
|
return builtin_open(*args, **kwargs)
|
|
|
|
|
|
|
|
with mock.patch("builtins.open", selective_mock_open):
|
|
|
|
yield tor_open
|
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="api_by_ip")
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
@override_settings(RATE_LIMIT_TOR_TOGETHER=True)
|
|
|
|
def test_tor_ip_limits(self) -> None:
|
|
|
|
request_count = 0
|
|
|
|
for ip in ["1.2.3.4", "5.6.7.8", "tor-exit-node"]:
|
|
|
|
RateLimitedIPAddr(ip, domain="api_by_ip").clear_history()
|
|
|
|
|
2022-06-08 04:52:09 +02:00
|
|
|
def alternate_requests() -> "TestHttpResponse":
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
nonlocal request_count
|
|
|
|
request_count += 1
|
|
|
|
if request_count % 2 == 1:
|
|
|
|
return self.send_unauthed_api_request(REMOTE_ADDR="1.2.3.4")
|
|
|
|
else:
|
|
|
|
return self.send_unauthed_api_request(REMOTE_ADDR="5.6.7.8")
|
|
|
|
|
|
|
|
with self.tor_mock(read_data=["1.2.3.4", "5.6.7.8"]) as tor_open:
|
|
|
|
self.do_test_hit_ratelimits(alternate_requests)
|
|
|
|
|
|
|
|
# This is only read once, despite being used on each request
|
|
|
|
tor_open.assert_called_once_with(settings.TOR_EXIT_NODE_FILE_PATH, "rb")
|
|
|
|
tor_open().read.assert_called_once()
|
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="api_by_ip")
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
@override_settings(RATE_LIMIT_TOR_TOGETHER=True)
|
|
|
|
def test_tor_file_empty(self) -> None:
|
|
|
|
for ip in ["1.2.3.4", "5.6.7.8", "tor-exit-node"]:
|
|
|
|
RateLimitedIPAddr(ip, domain="api_by_ip").clear_history()
|
|
|
|
|
|
|
|
# An empty list of IPs is treated as some error in parsing the
|
|
|
|
# input, and as such should not be cached; rate-limiting
|
|
|
|
# should work as normal, per-IP
|
|
|
|
with self.tor_mock(read_data=[]) as tor_open:
|
|
|
|
with self.assertLogs("zerver.lib.rate_limiter", level="WARNING"):
|
|
|
|
self.do_test_hit_ratelimits(
|
|
|
|
lambda: self.send_unauthed_api_request(REMOTE_ADDR="1.2.3.4")
|
|
|
|
)
|
|
|
|
resp = self.send_unauthed_api_request(REMOTE_ADDR="5.6.7.8")
|
|
|
|
self.assertNotEqual(resp.status_code, 429)
|
|
|
|
|
|
|
|
# Was not cached, so tried to read twice before hitting the
|
|
|
|
# circuit-breaker, and stopping trying
|
|
|
|
tor_open().read.assert_has_calls([mock.call(), mock.call()])
|
|
|
|
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="api_by_ip")
|
rate_limit: Add a flag to lump all TOR exit node IPs together.
TOR users are legitimate users of the system; however, that system can
also be used for abuse -- specifically, by evading IP-based
rate-limiting.
For the purposes of IP-based rate-limiting, add a
RATE_LIMIT_TOR_TOGETHER flag, defaulting to false, which lumps all
requests from TOR exit nodes into the same bucket. This may allow a
TOR user to deny other TOR users access to the find-my-account and
new-realm endpoints, but this is a low cost for cutting off a
significant potential abuse vector.
If enabled, the list of TOR exit nodes is fetched from their public
endpoint once per hour, via a cron job, and cached on disk. Django
processes load this data from disk, and cache it in memcached.
Requests are spared from the burden of checking disk on failure via a
circuitbreaker, which trips of there are two failures in a row, and
only begins trying again after 10 minutes.
2021-11-03 21:43:02 +01:00
|
|
|
@override_settings(RATE_LIMIT_TOR_TOGETHER=True)
|
|
|
|
def test_tor_file_not_found(self) -> None:
|
|
|
|
for ip in ["1.2.3.4", "5.6.7.8", "tor-exit-node"]:
|
|
|
|
RateLimitedIPAddr(ip, domain="api_by_ip").clear_history()
|
|
|
|
|
|
|
|
with self.tor_mock(side_effect=FileNotFoundError("File not found")) as tor_open:
|
|
|
|
# If we cannot get a list of TOR exit nodes, then
|
|
|
|
# rate-limiting works as normal, per-IP
|
|
|
|
with self.assertLogs("zerver.lib.rate_limiter", level="WARNING") as log_mock:
|
|
|
|
self.do_test_hit_ratelimits(
|
|
|
|
lambda: self.send_unauthed_api_request(REMOTE_ADDR="1.2.3.4")
|
|
|
|
)
|
|
|
|
resp = self.send_unauthed_api_request(REMOTE_ADDR="5.6.7.8")
|
|
|
|
self.assertNotEqual(resp.status_code, 429)
|
|
|
|
|
|
|
|
# Tries twice before hitting the circuit-breaker, and stopping trying
|
|
|
|
tor_open.assert_has_calls(
|
|
|
|
[
|
|
|
|
mock.call(settings.TOR_EXIT_NODE_FILE_PATH, "rb"),
|
|
|
|
mock.call(settings.TOR_EXIT_NODE_FILE_PATH, "rb"),
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
self.assert_length(log_mock.output, 8)
|
|
|
|
self.assertEqual(
|
|
|
|
log_mock.output[0:2],
|
|
|
|
[
|
|
|
|
"WARNING:zerver.lib.rate_limiter:Failed to fetch TOR exit node list: {}".format(
|
|
|
|
"File not found"
|
|
|
|
)
|
|
|
|
]
|
|
|
|
* 2,
|
|
|
|
)
|
|
|
|
self.assertIn(
|
|
|
|
'Failed to fetch TOR exit node list: Circuit "get_tor_ips" OPEN',
|
|
|
|
log_mock.output[3],
|
|
|
|
)
|
|
|
|
|
2021-07-08 14:46:47 +02:00
|
|
|
@skipUnless(settings.ZILENCER_ENABLED, "requires zilencer")
|
2023-06-07 23:01:42 +02:00
|
|
|
@ratelimit_rule(1, 5, domain="api_by_remote_server")
|
2021-07-08 14:46:47 +02:00
|
|
|
def test_hit_ratelimits_as_remote_server(self) -> None:
|
2021-12-22 14:37:12 +01:00
|
|
|
server_uuid = str(uuid.uuid4())
|
2021-07-08 14:46:47 +02:00
|
|
|
server = RemoteZulipServer(
|
|
|
|
uuid=server_uuid,
|
|
|
|
api_key="magic_secret_api_key",
|
|
|
|
hostname="demo.example.com",
|
|
|
|
last_updated=timezone_now(),
|
|
|
|
)
|
|
|
|
server.save()
|
|
|
|
|
|
|
|
endpoint = "/api/v1/remotes/push/register"
|
|
|
|
payload = {"user_id": 10, "token": "111222", "token_kind": PushDeviceToken.GCM}
|
|
|
|
try:
|
|
|
|
# Remote servers can only make requests to the root subdomain.
|
|
|
|
original_default_subdomain = self.DEFAULT_SUBDOMAIN
|
|
|
|
self.DEFAULT_SUBDOMAIN = ""
|
|
|
|
|
|
|
|
RateLimitedRemoteZulipServer(server).clear_history()
|
2022-08-14 16:19:44 +02:00
|
|
|
with self.assertLogs("zilencer.auth", level="WARNING") as m:
|
2021-07-08 15:33:15 +02:00
|
|
|
self.do_test_hit_ratelimits(lambda: self.uuid_post(server_uuid, endpoint, payload))
|
|
|
|
self.assertEqual(
|
|
|
|
m.output,
|
|
|
|
[
|
2023-03-09 07:26:45 +01:00
|
|
|
f"WARNING:zilencer.auth:Remote server demo.example.com {server_uuid[:12]} exceeded rate limits on domain api_by_remote_server"
|
2021-07-08 15:33:15 +02:00
|
|
|
],
|
|
|
|
)
|
2021-07-08 14:46:47 +02:00
|
|
|
finally:
|
|
|
|
self.DEFAULT_SUBDOMAIN = original_default_subdomain
|
|
|
|
|
2020-07-11 14:17:25 +02:00
|
|
|
def test_hit_ratelimiterlockingexception(self) -> None:
|
2021-02-12 08:20:45 +01:00
|
|
|
user = self.example_user("cordelia")
|
2020-03-04 14:05:25 +01:00
|
|
|
RateLimitedUser(user).clear_history()
|
2019-03-16 15:46:50 +01:00
|
|
|
|
2021-02-12 08:19:30 +01:00
|
|
|
with mock.patch(
|
2021-02-12 08:20:45 +01:00
|
|
|
"zerver.lib.rate_limiter.RedisRateLimiterBackend.incr_ratelimit",
|
2022-11-17 09:30:48 +01:00
|
|
|
side_effect=RateLimiterLockingError,
|
2021-02-12 08:19:30 +01:00
|
|
|
):
|
2020-07-11 14:17:25 +02:00
|
|
|
with self.assertLogs("zerver.lib.rate_limiter", level="WARNING") as m:
|
|
|
|
result = self.send_api_message(user, "some stuff")
|
|
|
|
self.assertEqual(result.status_code, 429)
|
|
|
|
self.assertEqual(
|
|
|
|
m.output,
|
2021-02-12 08:19:30 +01:00
|
|
|
[
|
|
|
|
"WARNING:zerver.lib.rate_limiter:Deadlock trying to incr_ratelimit for {}".format(
|
|
|
|
f"RateLimitedUser:{user.id}:api_by_user"
|
|
|
|
)
|
|
|
|
],
|
2020-05-02 08:44:14 +02:00
|
|
|
)
|