2020-06-11 00:54:34 +02:00
import os
from typing import Any , Dict , List , Mapping , Optional , Sequence , Tuple , Union
from unittest import mock
import ujson
2016-07-17 04:07:07 +02:00
from django . db import connection
2019-02-02 23:42:54 +01:00
from django . test import TestCase , override_settings
2020-06-11 00:54:34 +02:00
from sqlalchemy . sql import and_ , column , select , table
2019-08-28 22:35:22 +02:00
from sqlalchemy . sql . elements import ClauseElement
2016-06-21 21:05:44 +02:00
2020-06-11 00:54:34 +02:00
from zerver . lib . actions import do_deactivate_user , do_set_realm_property
from zerver . lib . message import MessageDict
from zerver . lib . narrow import build_narrow_filter , is_web_public_compatible
2017-03-05 08:56:57 +01:00
from zerver . lib . request import JsonableError
2016-07-19 08:12:35 +02:00
from zerver . lib . sqlalchemy_utils import get_sqlalchemy_connection
2020-06-11 00:54:34 +02:00
from zerver . lib . streams import create_streams_if_needed
from zerver . lib . test_classes import ZulipTestCase
from zerver . lib . test_helpers import POSTRequestMock , get_user_messages , queries_captured
from zerver . lib . topic import MATCH_TOPIC , TOPIC_NAME
from zerver . lib . topic_mutes import set_topic_mutes
2019-08-18 00:40:35 +02:00
from zerver . lib . types import DisplayRecipientT
2020-06-11 00:54:34 +02:00
from zerver . models import (
Message ,
Realm ,
Recipient ,
Stream ,
Subscription ,
UserMessage ,
get_display_recipient ,
get_realm ,
get_stream ,
)
2020-06-22 23:32:53 +02:00
from zerver . views . message_fetch import (
2020-06-11 00:54:34 +02:00
LARGER_THAN_MAX_MESSAGE_ID ,
BadNarrowOperator ,
NarrowBuilder ,
Query ,
2016-07-19 08:12:35 +02:00
exclude_muting_conditions ,
2018-04-05 14:54:30 +02:00
find_first_unread_anchor ,
2020-06-11 00:54:34 +02:00
get_messages_backend ,
ok_to_include_history ,
post_process_limited_query ,
2016-06-21 21:05:44 +02:00
)
2019-08-28 22:35:22 +02:00
def get_sqlalchemy_sql ( query : ClauseElement ) - > str :
2017-08-25 20:01:20 +02:00
dialect = get_sqlalchemy_connection ( ) . dialect
2019-08-28 22:35:22 +02:00
comp = query . compile ( dialect = dialect )
return str ( comp )
2016-06-21 21:05:44 +02:00
2019-08-28 22:35:22 +02:00
def get_sqlalchemy_query_params ( query : ClauseElement ) - > Dict [ str , object ] :
dialect = get_sqlalchemy_connection ( ) . dialect
comp = query . compile ( dialect = dialect )
return comp . params
2016-06-21 21:05:44 +02:00
2018-05-10 19:00:29 +02:00
def get_recipient_id_for_stream_name ( realm : Realm , stream_name : str ) - > str :
2016-06-21 21:05:44 +02:00
stream = get_stream ( stream_name , realm )
2020-02-18 17:25:43 +01:00
return stream . recipient . id
2016-06-21 21:05:44 +02:00
2018-05-10 19:00:29 +02:00
def mute_stream ( realm : Realm , user_profile : str , stream_name : str ) - > None :
2017-01-13 15:50:17 +01:00
stream = get_stream ( stream_name , realm )
2020-02-18 17:25:43 +01:00
recipient = stream . recipient
2016-06-21 21:05:44 +02:00
subscription = Subscription . objects . get ( recipient = recipient , user_profile = user_profile )
2018-08-02 23:46:05 +02:00
subscription . is_muted = True
2016-06-21 21:05:44 +02:00
subscription . save ( )
2018-03-15 11:58:25 +01:00
def first_visible_id_as ( message_id : int ) - > Any :
return mock . patch (
2020-06-22 23:32:53 +02:00
' zerver.views.message_fetch.get_first_visible_message_id ' ,
2018-03-15 11:58:25 +01:00
return_value = message_id ,
)
2016-08-23 02:08:42 +02:00
class NarrowBuilderTest ( ZulipTestCase ) :
2017-11-05 10:51:25 +01:00
def setUp ( self ) - > None :
2019-10-19 20:47:00 +02:00
super ( ) . setUp ( )
2017-01-04 05:30:48 +01:00
self . realm = get_realm ( ' zulip ' )
2017-05-07 17:21:26 +02:00
self . user_profile = self . example_user ( ' hamlet ' )
2016-06-21 21:05:44 +02:00
self . builder = NarrowBuilder ( self . user_profile , column ( ' id ' ) )
2017-02-22 22:13:57 +01:00
self . raw_query = select ( [ column ( " id " ) ] , None , table ( " zerver_message " ) )
2020-03-12 14:17:25 +01:00
self . hamlet_email = self . example_user ( ' hamlet ' ) . email
self . othello_email = self . example_user ( ' othello ' ) . email
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_not_defined_operator ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' not-defined ' , operand = ' any ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-05 10:51:25 +01:00
def test_add_term_using_stream_operator ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' stream ' , operand = ' Scotland ' )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id = %(recipient_id_1)s ' )
2016-06-21 21:05:44 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_stream_operator_and_negated ( self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' stream ' , operand = ' Scotland ' , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id != %(recipient_id_1)s ' )
2016-06-21 21:05:44 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_stream_operator_and_non_existing_operand_should_raise_error (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' stream ' , operand = ' NonExistingStream ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-05 10:51:25 +01:00
def test_add_term_using_is_operator_and_private_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' is ' , operand = ' private ' )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE (flags & %(flags_1)s ) != %(param_1)s ' )
2016-06-21 21:05:44 +02:00
2019-08-13 20:20:36 +02:00
def test_add_term_using_streams_operator_and_invalid_operand_should_raise_error (
self ) - > None : # NEGATED
term = dict ( operator = ' streams ' , operand = ' invalid_operands ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
def test_add_term_using_streams_operator_and_public_stream_operand ( self ) - > None :
term = dict ( operator = ' streams ' , operand = ' public ' )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id IN ( %(recipient_id_1)s , %(recipient_id_2)s , %(recipient_id_3)s , %(recipient_id_4)s , %(recipient_id_5)s ) ' )
2019-08-13 20:20:36 +02:00
# Add new streams
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
stream_dicts : List [ Mapping [ str , Any ] ] = [
2019-08-13 20:20:36 +02:00
{
" name " : " publicstream " ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
" description " : " Public stream with public history " ,
2019-08-13 20:20:36 +02:00
} ,
{
" name " : " privatestream " ,
" description " : " Private stream with non-public history " ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
" invite_only " : True ,
2019-08-13 20:20:36 +02:00
} ,
{
" name " : " privatewithhistory " ,
" description " : " Private stream with public history " ,
" invite_only " : True ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
" history_public_to_subscribers " : True ,
} ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
]
2019-08-13 20:20:36 +02:00
realm = get_realm ( ' zulip ' )
created , existing = create_streams_if_needed ( realm , stream_dicts )
self . assertEqual ( len ( created ) , 3 )
self . assertEqual ( len ( existing ) , 0 )
# Number of recipient ids will increase by 1 and not 3
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id IN ( %(recipient_id_1)s , %(recipient_id_2)s , %(recipient_id_3)s , %(recipient_id_4)s , %(recipient_id_5)s , %(recipient_id_6)s ) ' )
2019-08-13 20:20:36 +02:00
def test_add_term_using_streams_operator_and_public_stream_operand_negated ( self ) - > None :
term = dict ( operator = ' streams ' , operand = ' public ' , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id NOT IN ( %(recipient_id_1)s , %(recipient_id_2)s , %(recipient_id_3)s , %(recipient_id_4)s , %(recipient_id_5)s ) ' )
2019-08-13 20:20:36 +02:00
# Add new streams
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
stream_dicts : List [ Mapping [ str , Any ] ] = [
2019-08-13 20:20:36 +02:00
{
" name " : " publicstream " ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
" description " : " Public stream with public history " ,
2019-08-13 20:20:36 +02:00
} ,
{
" name " : " privatestream " ,
" description " : " Private stream with non-public history " ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
" invite_only " : True ,
2019-08-13 20:20:36 +02:00
} ,
{
" name " : " privatewithhistory " ,
" description " : " Private stream with public history " ,
" invite_only " : True ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
" history_public_to_subscribers " : True ,
} ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
]
2019-08-13 20:20:36 +02:00
realm = get_realm ( ' zulip ' )
created , existing = create_streams_if_needed ( realm , stream_dicts )
self . assertEqual ( len ( created ) , 3 )
self . assertEqual ( len ( existing ) , 0 )
# Number of recipient ids will increase by 1 and not 3
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id NOT IN ( %(recipient_id_1)s , %(recipient_id_2)s , %(recipient_id_3)s , %(recipient_id_4)s , %(recipient_id_5)s , %(recipient_id_6)s ) ' )
2019-08-13 20:20:36 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_is_operator_private_operand_and_negated (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' is ' , operand = ' private ' , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE (flags & %(flags_1)s ) = %(param_1)s ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_is_operator_and_non_private_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
for operand in [ ' starred ' , ' mentioned ' , ' alerted ' ] :
term = dict ( operator = ' is ' , operand = operand )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE (flags & %(flags_1)s ) != %(param_1)s ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_is_operator_and_unread_operand ( self ) - > None :
2017-06-19 03:21:48 +02:00
term = dict ( operator = ' is ' , operand = ' unread ' )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE (flags & %(flags_1)s ) = %(param_1)s ' )
2017-06-19 03:21:48 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_is_operator_and_unread_operand_and_negated (
self ) - > None : # NEGATED
2017-06-19 03:21:48 +02:00
term = dict ( operator = ' is ' , operand = ' unread ' , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE (flags & %(flags_1)s ) != %(param_1)s ' )
2017-06-19 03:21:48 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_is_operator_non_private_operand_and_negated (
self ) - > None : # NEGATED
2017-08-16 15:20:11 +02:00
term = dict ( operator = ' is ' , operand = ' starred ' , negated = True )
2019-08-28 22:35:22 +02:00
where_clause = ' WHERE (flags & %(flags_1)s ) = %(param_1)s '
2017-08-16 15:20:11 +02:00
params = dict (
flags_1 = UserMessage . flags . starred . mask ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
param_1 = 0 ,
2017-08-16 15:20:11 +02:00
)
self . _do_add_term_test ( term , where_clause , params )
term = dict ( operator = ' is ' , operand = ' alerted ' , negated = True )
2019-08-28 22:35:22 +02:00
where_clause = ' WHERE (flags & %(flags_1)s ) = %(param_1)s '
2017-08-16 15:20:11 +02:00
params = dict (
flags_1 = UserMessage . flags . has_alert_word . mask ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
param_1 = 0 ,
2017-08-16 15:20:11 +02:00
)
self . _do_add_term_test ( term , where_clause , params )
term = dict ( operator = ' is ' , operand = ' mentioned ' , negated = True )
2019-08-28 22:35:22 +02:00
where_clause = ' WHERE NOT ((flags & %(flags_1)s ) != %(param_1)s OR (flags & %(flags_2)s ) != %(param_2)s ) '
2017-08-16 15:20:11 +02:00
params = dict (
flags_1 = UserMessage . flags . mentioned . mask ,
param_1 = 0 ,
flags_2 = UserMessage . flags . wildcard_mentioned . mask ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
param_2 = 0 ,
2017-08-16 15:20:11 +02:00
)
self . _do_add_term_test ( term , where_clause , params )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_non_supported_operator_should_raise_error ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' is ' , operand = ' non_supported ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-05 10:51:25 +01:00
def test_add_term_using_topic_operator_and_lunch_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' topic ' , operand = ' lunch ' )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE upper(subject) = upper( %(param_1)s ) ' )
2016-06-21 21:05:44 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_topic_operator_lunch_operand_and_negated (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' topic ' , operand = ' lunch ' , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE upper(subject) != upper( %(param_1)s ) ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_topic_operator_and_personal_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' topic ' , operand = ' personal ' )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE upper(subject) = upper( %(param_1)s ) ' )
2016-06-21 21:05:44 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_topic_operator_personal_operand_and_negated (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' topic ' , operand = ' personal ' , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE upper(subject) != upper( %(param_1)s ) ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_sender_operator ( self ) - > None :
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' sender ' , operand = self . othello_email )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE sender_id = %(param_1)s ' )
2016-06-21 21:05:44 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_sender_operator_and_negated ( self ) - > None : # NEGATED
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' sender ' , operand = self . othello_email , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE sender_id != %(param_1)s ' )
2016-06-21 21:05:44 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_sender_operator_with_non_existing_user_as_operand (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' sender ' , operand = ' non-existing@zulip.com ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-05 10:51:25 +01:00
def test_add_term_using_pm_with_operator_and_not_the_same_user_as_operand ( self ) - > None :
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' pm-with ' , operand = self . othello_email )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s OR sender_id = %(sender_id_2)s AND recipient_id = %(recipient_id_2)s ' )
2016-06-21 21:05:44 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_pm_with_operator_not_the_same_user_as_operand_and_negated (
self ) - > None : # NEGATED
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' pm-with ' , operand = self . othello_email , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE NOT (sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s OR sender_id = %(sender_id_2)s AND recipient_id = %(recipient_id_2)s ) ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_pm_with_operator_the_same_user_as_operand ( self ) - > None :
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' pm-with ' , operand = self . hamlet_email )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s ' )
2016-06-21 21:05:44 +02:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_pm_with_operator_the_same_user_as_operand_and_negated (
self ) - > None : # NEGATED
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' pm-with ' , operand = self . hamlet_email , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE NOT (sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s ) ' )
2016-06-21 21:05:44 +02:00
narrow: Handle spurious emails in pm-with searches.
If cordelia searches on pm-with:iago@zulip.com,cordelia@zulip.com,
we now properly treat that the same way as pm-with:iago@zulip.com.
Before this fix, the query would initially go through the
huddle code path. The symptom wasn't completely obvious, as
eventually a deeper function would return a recipient id
corresponding to a single PM with @iago@zulip.com, but we would
only get messages where iago was the recipient, and not any
messages where he was the sender to cordelia.
I put the helper function for this in zerver/lib/addressee, which
is somewhat speculative. Eventually, we'll want pm-with queries
to allow for user ids, and I imagine there will be some shared
logic with other Addressee code in terms of how we handle these
strings. The way we deal with lists of emails/users for various
endpoints is kind of haphazard in the current code, although
granted it's mostly just repeating the same simple patterns. It
would be nice for some of this code to converge a bit. This
affects new messages, typing indicators, search filters, etc.,
and some endpoints have strange legacy stuff like supporting
JSON-encoded lists, so it's not trivial to clean this up.
Tweaked by tabbott to add some additional tests.
2018-10-12 17:56:46 +02:00
def test_add_term_using_pm_with_operator_and_self_and_user_as_operand ( self ) - > None :
2020-03-12 14:17:25 +01:00
myself_and_other = ' , ' . join ( [
self . example_user ( ' hamlet ' ) . email ,
self . example_user ( ' othello ' ) . email ,
] )
term = dict ( operator = ' pm-with ' , operand = myself_and_other )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s OR sender_id = %(sender_id_2)s AND recipient_id = %(recipient_id_2)s ' )
narrow: Handle spurious emails in pm-with searches.
If cordelia searches on pm-with:iago@zulip.com,cordelia@zulip.com,
we now properly treat that the same way as pm-with:iago@zulip.com.
Before this fix, the query would initially go through the
huddle code path. The symptom wasn't completely obvious, as
eventually a deeper function would return a recipient id
corresponding to a single PM with @iago@zulip.com, but we would
only get messages where iago was the recipient, and not any
messages where he was the sender to cordelia.
I put the helper function for this in zerver/lib/addressee, which
is somewhat speculative. Eventually, we'll want pm-with queries
to allow for user ids, and I imagine there will be some shared
logic with other Addressee code in terms of how we handle these
strings. The way we deal with lists of emails/users for various
endpoints is kind of haphazard in the current code, although
granted it's mostly just repeating the same simple patterns. It
would be nice for some of this code to converge a bit. This
affects new messages, typing indicators, search filters, etc.,
and some endpoints have strange legacy stuff like supporting
JSON-encoded lists, so it's not trivial to clean this up.
Tweaked by tabbott to add some additional tests.
2018-10-12 17:56:46 +02:00
def test_add_term_using_pm_with_operator_more_than_one_user_as_operand ( self ) - > None :
2020-03-12 14:17:25 +01:00
two_others = ' , ' . join ( [
self . example_user ( ' cordelia ' ) . email ,
self . example_user ( ' othello ' ) . email ,
] )
term = dict ( operator = ' pm-with ' , operand = two_others )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id = %(recipient_id_1)s ' )
2016-06-21 21:05:44 +02:00
narrow: Handle spurious emails in pm-with searches.
If cordelia searches on pm-with:iago@zulip.com,cordelia@zulip.com,
we now properly treat that the same way as pm-with:iago@zulip.com.
Before this fix, the query would initially go through the
huddle code path. The symptom wasn't completely obvious, as
eventually a deeper function would return a recipient id
corresponding to a single PM with @iago@zulip.com, but we would
only get messages where iago was the recipient, and not any
messages where he was the sender to cordelia.
I put the helper function for this in zerver/lib/addressee, which
is somewhat speculative. Eventually, we'll want pm-with queries
to allow for user ids, and I imagine there will be some shared
logic with other Addressee code in terms of how we handle these
strings. The way we deal with lists of emails/users for various
endpoints is kind of haphazard in the current code, although
granted it's mostly just repeating the same simple patterns. It
would be nice for some of this code to converge a bit. This
affects new messages, typing indicators, search filters, etc.,
and some endpoints have strange legacy stuff like supporting
JSON-encoded lists, so it's not trivial to clean this up.
Tweaked by tabbott to add some additional tests.
2018-10-12 17:56:46 +02:00
def test_add_term_using_pm_with_operator_self_and_user_as_operand_and_negated (
2017-11-19 04:02:03 +01:00
self ) - > None : # NEGATED
2020-03-12 14:17:25 +01:00
myself_and_other = ' , ' . join ( [
self . example_user ( ' hamlet ' ) . email ,
self . example_user ( ' othello ' ) . email ,
] )
term = dict ( operator = ' pm-with ' , operand = myself_and_other , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE NOT (sender_id = %(sender_id_1)s AND recipient_id = %(recipient_id_1)s OR sender_id = %(sender_id_2)s AND recipient_id = %(recipient_id_2)s ) ' )
narrow: Handle spurious emails in pm-with searches.
If cordelia searches on pm-with:iago@zulip.com,cordelia@zulip.com,
we now properly treat that the same way as pm-with:iago@zulip.com.
Before this fix, the query would initially go through the
huddle code path. The symptom wasn't completely obvious, as
eventually a deeper function would return a recipient id
corresponding to a single PM with @iago@zulip.com, but we would
only get messages where iago was the recipient, and not any
messages where he was the sender to cordelia.
I put the helper function for this in zerver/lib/addressee, which
is somewhat speculative. Eventually, we'll want pm-with queries
to allow for user ids, and I imagine there will be some shared
logic with other Addressee code in terms of how we handle these
strings. The way we deal with lists of emails/users for various
endpoints is kind of haphazard in the current code, although
granted it's mostly just repeating the same simple patterns. It
would be nice for some of this code to converge a bit. This
affects new messages, typing indicators, search filters, etc.,
and some endpoints have strange legacy stuff like supporting
JSON-encoded lists, so it's not trivial to clean this up.
Tweaked by tabbott to add some additional tests.
2018-10-12 17:56:46 +02:00
def test_add_term_using_pm_with_operator_more_than_one_user_as_operand_and_negated ( self ) - > None :
2020-03-12 14:17:25 +01:00
two_others = ' , ' . join ( [
self . example_user ( ' cordelia ' ) . email ,
self . example_user ( ' othello ' ) . email ,
] )
term = dict ( operator = ' pm-with ' , operand = two_others , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id != %(recipient_id_1)s ' )
2016-06-21 21:05:44 +02:00
narrow: Handle spurious emails in pm-with searches.
If cordelia searches on pm-with:iago@zulip.com,cordelia@zulip.com,
we now properly treat that the same way as pm-with:iago@zulip.com.
Before this fix, the query would initially go through the
huddle code path. The symptom wasn't completely obvious, as
eventually a deeper function would return a recipient id
corresponding to a single PM with @iago@zulip.com, but we would
only get messages where iago was the recipient, and not any
messages where he was the sender to cordelia.
I put the helper function for this in zerver/lib/addressee, which
is somewhat speculative. Eventually, we'll want pm-with queries
to allow for user ids, and I imagine there will be some shared
logic with other Addressee code in terms of how we handle these
strings. The way we deal with lists of emails/users for various
endpoints is kind of haphazard in the current code, although
granted it's mostly just repeating the same simple patterns. It
would be nice for some of this code to converge a bit. This
affects new messages, typing indicators, search filters, etc.,
and some endpoints have strange legacy stuff like supporting
JSON-encoded lists, so it's not trivial to clean this up.
Tweaked by tabbott to add some additional tests.
2018-10-12 17:56:46 +02:00
def test_add_term_using_pm_with_operator_with_comma_noise ( self ) - > None :
term = dict ( operator = ' pm-with ' , operand = ' ,,, ,,, , ' )
2016-06-21 21:05:44 +02:00
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-05 10:51:25 +01:00
def test_add_term_using_pm_with_operator_with_existing_and_non_existing_user_as_operand ( self ) - > None :
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' pm-with ' , operand = self . othello_email + ' ,non-existing@zulip.com ' )
2016-06-21 21:05:44 +02:00
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-05 10:51:25 +01:00
def test_add_term_using_id_operator ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' id ' , operand = 555 )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE id = %(param_1)s ' )
2016-06-21 21:05:44 +02:00
2018-11-07 00:53:02 +01:00
def test_add_term_using_id_operator_invalid ( self ) - > None :
term = dict ( operator = ' id ' , operand = ' ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
term = dict ( operator = ' id ' , operand = ' notanint ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-19 04:02:03 +01:00
def test_add_term_using_id_operator_and_negated ( self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' id ' , operand = 555 , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE id != %(param_1)s ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_group_pm_operator_and_not_the_same_user_as_operand ( self ) - > None :
2018-02-27 00:10:14 +01:00
# Test wtihout any such group PM threads existing
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' group-pm-with ' , operand = self . othello_email )
2018-02-27 00:12:39 +01:00
self . _do_add_term_test ( term , ' WHERE 1 != 1 ' )
2017-03-23 23:35:37 +01:00
2018-02-27 00:10:14 +01:00
# Test with at least one such group PM thread existing
2020-03-07 11:43:05 +01:00
self . send_huddle_message ( self . user_profile , [ self . example_user ( " othello " ) ,
self . example_user ( " cordelia " ) ] )
2018-02-27 00:10:14 +01:00
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' group-pm-with ' , operand = self . othello_email )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id IN ( %(recipient_id_1)s ) ' )
2018-02-27 00:10:14 +01:00
2017-11-19 04:02:03 +01:00
def test_add_term_using_group_pm_operator_not_the_same_user_as_operand_and_negated (
self ) - > None : # NEGATED
2020-03-12 14:17:25 +01:00
term = dict ( operator = ' group-pm-with ' , operand = self . othello_email , negated = True )
2018-02-27 00:12:39 +01:00
self . _do_add_term_test ( term , ' WHERE 1 = 1 ' )
2017-03-23 23:35:37 +01:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_group_pm_operator_with_non_existing_user_as_operand ( self ) - > None :
2017-03-23 23:35:37 +01:00
term = dict ( operator = ' group-pm-with ' , operand = ' non-existing@zulip.com ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2016-04-24 17:08:51 +02:00
@override_settings ( USING_PGROONGA = False )
2017-11-05 10:51:25 +01:00
def test_add_term_using_search_operator ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' search ' , operand = ' " french fries " ' )
2019-08-28 11:06:38 +02:00
self . _do_add_term_test ( term , ' WHERE (content ILIKE %(content_1)s OR subject ILIKE %(subject_1)s ) AND (search_tsvector @@ plainto_tsquery( %(param_4)s , %(param_5)s )) ' )
2016-06-21 21:05:44 +02:00
2016-04-24 17:08:51 +02:00
@override_settings ( USING_PGROONGA = False )
2017-11-19 04:02:03 +01:00
def test_add_term_using_search_operator_and_negated (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' search ' , operand = ' " french fries " ' , negated = True )
2019-08-28 11:06:38 +02:00
self . _do_add_term_test ( term , ' WHERE NOT (content ILIKE %(content_1)s OR subject ILIKE %(subject_1)s ) AND NOT (search_tsvector @@ plainto_tsquery( %(param_4)s , %(param_5)s )) ' )
2016-06-21 21:05:44 +02:00
2016-04-24 17:08:51 +02:00
@override_settings ( USING_PGROONGA = True )
2017-11-05 10:51:25 +01:00
def test_add_term_using_search_operator_pgroonga ( self ) - > None :
2016-04-24 17:08:51 +02:00
term = dict ( operator = ' search ' , operand = ' " french fries " ' )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE search_pgroonga &@~ escape_html( %(escape_html_1)s ) ' )
2016-04-24 17:08:51 +02:00
@override_settings ( USING_PGROONGA = True )
2017-11-19 04:02:03 +01:00
def test_add_term_using_search_operator_and_negated_pgroonga (
self ) - > None : # NEGATED
2016-04-24 17:08:51 +02:00
term = dict ( operator = ' search ' , operand = ' " french fries " ' , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE NOT (search_pgroonga &@~ escape_html( %(escape_html_1)s )) ' )
2016-04-24 17:08:51 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_has_operator_and_attachment_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' has ' , operand = ' attachment ' )
self . _do_add_term_test ( term , ' WHERE has_attachment ' )
2017-11-19 04:02:03 +01:00
def test_add_term_using_has_operator_attachment_operand_and_negated (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' has ' , operand = ' attachment ' , negated = True )
self . _do_add_term_test ( term , ' WHERE NOT has_attachment ' )
2017-11-05 10:51:25 +01:00
def test_add_term_using_has_operator_and_image_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' has ' , operand = ' image ' )
self . _do_add_term_test ( term , ' WHERE has_image ' )
2017-11-19 04:02:03 +01:00
def test_add_term_using_has_operator_image_operand_and_negated (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' has ' , operand = ' image ' , negated = True )
self . _do_add_term_test ( term , ' WHERE NOT has_image ' )
2017-11-05 10:51:25 +01:00
def test_add_term_using_has_operator_and_link_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' has ' , operand = ' link ' )
self . _do_add_term_test ( term , ' WHERE has_link ' )
2017-11-19 04:02:03 +01:00
def test_add_term_using_has_operator_link_operand_and_negated (
self ) - > None : # NEGATED
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' has ' , operand = ' link ' , negated = True )
self . _do_add_term_test ( term , ' WHERE NOT has_link ' )
2017-11-05 10:51:25 +01:00
def test_add_term_using_has_operator_non_supported_operand_should_raise_error ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' has ' , operand = ' non_supported ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-05 10:51:25 +01:00
def test_add_term_using_in_operator ( self ) - > None :
2016-06-21 21:05:44 +02:00
mute_stream ( self . realm , self . user_profile , ' Verona ' )
term = dict ( operator = ' in ' , operand = ' home ' )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id NOT IN ( %(recipient_id_1)s ) ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_in_operator_and_negated ( self ) - > None :
2016-06-21 21:05:44 +02:00
# negated = True should not change anything
mute_stream ( self . realm , self . user_profile , ' Verona ' )
term = dict ( operator = ' in ' , operand = ' home ' , negated = True )
2019-08-28 22:35:22 +02:00
self . _do_add_term_test ( term , ' WHERE recipient_id NOT IN ( %(recipient_id_1)s ) ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_in_operator_and_all_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
mute_stream ( self . realm , self . user_profile , ' Verona ' )
term = dict ( operator = ' in ' , operand = ' all ' )
query = self . _build_query ( term )
2019-08-28 22:35:22 +02:00
self . assertEqual ( get_sqlalchemy_sql ( query ) , ' SELECT id \n FROM zerver_message ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_in_operator_all_operand_and_negated ( self ) - > None :
2016-06-21 21:05:44 +02:00
# negated = True should not change anything
mute_stream ( self . realm , self . user_profile , ' Verona ' )
term = dict ( operator = ' in ' , operand = ' all ' , negated = True )
query = self . _build_query ( term )
2019-08-28 22:35:22 +02:00
self . assertEqual ( get_sqlalchemy_sql ( query ) , ' SELECT id \n FROM zerver_message ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_add_term_using_in_operator_and_not_defined_operand ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' in ' , operand = ' not_defined ' )
self . assertRaises ( BadNarrowOperator , self . _build_query , term )
2017-11-05 10:51:25 +01:00
def test_add_term_using_near_operator ( self ) - > None :
2016-06-21 21:05:44 +02:00
term = dict ( operator = ' near ' , operand = ' operand ' )
query = self . _build_query ( term )
2019-08-28 22:35:22 +02:00
self . assertEqual ( get_sqlalchemy_sql ( query ) , ' SELECT id \n FROM zerver_message ' )
2016-06-21 21:05:44 +02:00
2018-05-10 19:00:29 +02:00
def _do_add_term_test ( self , term : Dict [ str , Any ] , where_clause : str ,
2017-11-17 07:00:53 +01:00
params : Optional [ Dict [ str , Any ] ] = None ) - > None :
2017-08-16 15:20:11 +02:00
query = self . _build_query ( term )
if params is not None :
2019-08-28 22:35:22 +02:00
actual_params = get_sqlalchemy_query_params ( query )
2017-08-16 15:20:11 +02:00
self . assertEqual ( actual_params , params )
2019-08-28 22:35:22 +02:00
self . assertIn ( where_clause , get_sqlalchemy_sql ( query ) )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def _build_query ( self , term : Dict [ str , Any ] ) - > Query :
2016-06-21 21:05:44 +02:00
return self . builder . add_term ( self . raw_query , term )
2018-05-21 17:44:00 +02:00
class NarrowLibraryTest ( TestCase ) :
2017-11-05 10:51:25 +01:00
def test_build_narrow_filter ( self ) - > None :
2016-07-16 22:56:33 +02:00
fixtures_path = os . path . join ( os . path . dirname ( __file__ ) ,
2018-04-19 20:17:24 +02:00
' fixtures/narrow.json ' )
2020-04-09 21:51:58 +02:00
with open ( fixtures_path ) as f :
2020-03-20 02:12:02 +01:00
scenarios = ujson . load ( f )
2017-06-19 03:21:48 +02:00
self . assertTrue ( len ( scenarios ) == 9 )
2016-07-16 22:56:33 +02:00
for scenario in scenarios :
narrow = scenario [ ' narrow ' ]
accept_events = scenario [ ' accept_events ' ]
reject_events = scenario [ ' reject_events ' ]
narrow_filter = build_narrow_filter ( narrow )
for e in accept_events :
self . assertTrue ( narrow_filter ( e ) )
for e in reject_events :
self . assertFalse ( narrow_filter ( e ) )
2017-11-05 10:51:25 +01:00
def test_build_narrow_filter_invalid ( self ) - > None :
2017-03-05 08:56:57 +01:00
with self . assertRaises ( JsonableError ) :
build_narrow_filter ( [ " invalid_operator " , " operand " ] )
2018-05-21 17:44:00 +02:00
def test_is_web_public_compatible ( self ) - > None :
self . assertTrue ( is_web_public_compatible ( [ ] ) )
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " has " ,
" operand " : " attachment " } ] ) )
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " has " ,
" operand " : " image " } ] ) )
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " search " ,
" operand " : " magic " } ] ) )
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " near " ,
" operand " : " 15 " } ] ) )
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " id " ,
" operand " : " 15 " } ,
{ " operator " : " has " ,
" operand " : " attachment " } ] ) )
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " sender " ,
" operand " : " hamlet@zulip.com " } ] ) )
self . assertFalse ( is_web_public_compatible ( [ { " operator " : " pm-with " ,
" operand " : " hamlet@zulip.com " } ] ) )
self . assertFalse ( is_web_public_compatible ( [ { " operator " : " group-pm-with " ,
" operand " : " hamlet@zulip.com " } ] ) )
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " stream " ,
" operand " : " Denmark " } ] ) )
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " stream " ,
" operand " : " Denmark " } ,
{ " operator " : " topic " ,
" operand " : " logic " } ] ) )
self . assertFalse ( is_web_public_compatible ( [ { " operator " : " is " ,
" operand " : " starred " } ] ) )
self . assertFalse ( is_web_public_compatible ( [ { " operator " : " is " ,
" operand " : " private " } ] ) )
2019-08-13 20:20:36 +02:00
self . assertTrue ( is_web_public_compatible ( [ { " operator " : " streams " ,
" operand " : " public " } ] ) )
2018-05-21 17:44:00 +02:00
# Malformed input not allowed
self . assertFalse ( is_web_public_compatible ( [ { " operator " : " has " } ] ) )
2016-08-23 02:08:42 +02:00
class IncludeHistoryTest ( ZulipTestCase ) :
2017-11-05 10:51:25 +01:00
def test_ok_to_include_history ( self ) - > None :
2018-04-05 00:06:53 +02:00
user_profile = self . example_user ( " hamlet " )
self . make_stream ( ' public_stream ' , realm = user_profile . realm )
2016-06-21 21:05:44 +02:00
# Negated stream searches should not include history.
narrow = [
dict ( operator = ' stream ' , operand = ' public_stream ' , negated = True ) ,
]
2018-04-05 00:06:53 +02:00
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
2016-06-21 21:05:44 +02:00
2019-08-13 20:20:36 +02:00
# streams:public searches should include history for non-guest members.
narrow = [
dict ( operator = ' streams ' , operand = ' public ' ) ,
]
self . assertTrue ( ok_to_include_history ( narrow , user_profile ) )
# Negated -streams:public searches should not include history.
narrow = [
dict ( operator = ' streams ' , operand = ' public ' , negated = True ) ,
]
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
2016-06-21 21:05:44 +02:00
# Definitely forbid seeing history on private streams.
2018-04-05 00:28:14 +02:00
self . make_stream ( ' private_stream ' , realm = user_profile . realm , invite_only = True )
subscribed_user_profile = self . example_user ( " cordelia " )
self . subscribe ( subscribed_user_profile , ' private_stream ' )
2016-06-21 21:05:44 +02:00
narrow = [
dict ( operator = ' stream ' , operand = ' private_stream ' ) ,
]
2018-04-05 00:06:53 +02:00
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
2016-06-21 21:05:44 +02:00
2018-04-27 01:00:26 +02:00
# Verify that with stream.history_public_to_subscribers, subscribed
# users can access history.
self . make_stream ( ' private_stream_2 ' , realm = user_profile . realm ,
invite_only = True , history_public_to_subscribers = True )
subscribed_user_profile = self . example_user ( " cordelia " )
self . subscribe ( subscribed_user_profile , ' private_stream_2 ' )
narrow = [
dict ( operator = ' stream ' , operand = ' private_stream_2 ' ) ,
]
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
self . assertTrue ( ok_to_include_history ( narrow , subscribed_user_profile ) )
2018-04-05 00:28:14 +02:00
2016-06-21 21:05:44 +02:00
# History doesn't apply to PMs.
narrow = [
dict ( operator = ' is ' , operand = ' private ' ) ,
]
2018-04-05 00:06:53 +02:00
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
2016-06-21 21:05:44 +02:00
2017-06-19 03:21:48 +02:00
# History doesn't apply to unread messages.
narrow = [
dict ( operator = ' is ' , operand = ' unread ' ) ,
]
2018-04-05 00:06:53 +02:00
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
2017-06-19 03:21:48 +02:00
2016-06-21 21:05:44 +02:00
# If we are looking for something like starred messages, there is
# no point in searching historical messages.
narrow = [
dict ( operator = ' stream ' , operand = ' public_stream ' ) ,
dict ( operator = ' is ' , operand = ' starred ' ) ,
]
2018-04-05 00:06:53 +02:00
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
2016-06-21 21:05:44 +02:00
2019-08-13 20:20:36 +02:00
# No point in searching history for is operator even if included with
# streams:public
narrow = [
dict ( operator = ' streams ' , operand = ' public ' ) ,
dict ( operator = ' is ' , operand = ' mentioned ' ) ,
]
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
narrow = [
dict ( operator = ' streams ' , operand = ' public ' ) ,
dict ( operator = ' is ' , operand = ' unread ' ) ,
]
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
narrow = [
dict ( operator = ' streams ' , operand = ' public ' ) ,
dict ( operator = ' is ' , operand = ' alerted ' ) ,
]
self . assertFalse ( ok_to_include_history ( narrow , user_profile ) )
2016-06-21 21:05:44 +02:00
# simple True case
narrow = [
dict ( operator = ' stream ' , operand = ' public_stream ' ) ,
]
2018-04-05 00:06:53 +02:00
self . assertTrue ( ok_to_include_history ( narrow , user_profile ) )
2016-06-21 21:05:44 +02:00
narrow = [
dict ( operator = ' stream ' , operand = ' public_stream ' ) ,
dict ( operator = ' topic ' , operand = ' whatever ' ) ,
dict ( operator = ' search ' , operand = ' needle in haystack ' ) ,
]
2018-04-05 00:06:53 +02:00
self . assertTrue ( ok_to_include_history ( narrow , user_profile ) )
2016-06-21 21:05:44 +02:00
2018-05-02 17:00:06 +02:00
# Tests for guest user
guest_user_profile = self . example_user ( " polonius " )
# Using 'Cordelia' to compare between a guest and a normal user
subscribed_user_profile = self . example_user ( " cordelia " )
2019-08-13 20:20:36 +02:00
# streams:public searches should not include history for guest members.
narrow = [
dict ( operator = ' streams ' , operand = ' public ' ) ,
]
self . assertFalse ( ok_to_include_history ( narrow , guest_user_profile ) )
2018-05-02 17:00:06 +02:00
# Guest user can't access public stream
self . subscribe ( subscribed_user_profile , ' public_stream_2 ' )
narrow = [
dict ( operator = ' stream ' , operand = ' public_stream_2 ' ) ,
]
self . assertFalse ( ok_to_include_history ( narrow , guest_user_profile ) )
self . assertTrue ( ok_to_include_history ( narrow , subscribed_user_profile ) )
# Definitely, a guest user can't access the unsubscribed private stream
self . subscribe ( subscribed_user_profile , ' private_stream_3 ' )
narrow = [
dict ( operator = ' stream ' , operand = ' private_stream_3 ' ) ,
]
self . assertFalse ( ok_to_include_history ( narrow , guest_user_profile ) )
self . assertTrue ( ok_to_include_history ( narrow , subscribed_user_profile ) )
# Guest user can access (history of) subscribed private streams
self . subscribe ( guest_user_profile , ' private_stream_4 ' )
self . subscribe ( subscribed_user_profile , ' private_stream_4 ' )
narrow = [
dict ( operator = ' stream ' , operand = ' private_stream_4 ' ) ,
]
self . assertTrue ( ok_to_include_history ( narrow , guest_user_profile ) )
self . assertTrue ( ok_to_include_history ( narrow , subscribed_user_profile ) )
2018-03-15 11:20:55 +01:00
class PostProcessTest ( ZulipTestCase ) :
def test_basics ( self ) - > None :
def verify ( in_ids : List [ int ] ,
num_before : int ,
num_after : int ,
2018-09-19 14:23:02 +02:00
first_visible_message_id : int ,
2018-03-15 11:20:55 +01:00
anchor : int ,
anchored_to_left : bool ,
anchored_to_right : bool ,
out_ids : List [ int ] ,
found_anchor : bool ,
found_oldest : bool ,
2018-09-19 14:23:02 +02:00
found_newest : bool ,
history_limited : bool ) - > None :
2018-03-15 11:20:55 +01:00
in_rows = [ [ row_id ] for row_id in in_ids ]
out_rows = [ [ row_id ] for row_id in out_ids ]
info = post_process_limited_query (
rows = in_rows ,
num_before = num_before ,
num_after = num_after ,
anchor = anchor ,
anchored_to_left = anchored_to_left ,
anchored_to_right = anchored_to_right ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = first_visible_message_id ,
2018-03-15 11:20:55 +01:00
)
self . assertEqual ( info [ ' rows ' ] , out_rows )
self . assertEqual ( info [ ' found_anchor ' ] , found_anchor )
self . assertEqual ( info [ ' found_newest ' ] , found_newest )
2018-10-27 02:30:49 +02:00
self . assertEqual ( info [ ' found_oldest ' ] , found_oldest )
2018-09-19 14:23:02 +02:00
self . assertEqual ( info [ ' history_limited ' ] , history_limited )
2018-03-15 11:20:55 +01:00
2018-09-19 14:23:02 +02:00
# typical 2-sided query, with a bunch of tests for different
# values of first_visible_message_id.
2018-03-15 11:20:55 +01:00
anchor = 10
verify (
in_ids = [ 8 , 9 , anchor , 11 , 12 ] ,
num_before = 2 , num_after = 2 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 8 , 9 , 10 , 11 , 12 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 8 , 9 , anchor , 11 , 12 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = 8 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 8 , 9 , 10 , 11 , 12 ] ,
found_anchor = True , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 8 , 9 , anchor , 11 , 12 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = 9 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 9 , 10 , 11 , 12 ] ,
found_anchor = True , found_oldest = True ,
found_newest = False , history_limited = True ,
)
verify (
in_ids = [ 8 , 9 , anchor , 11 , 12 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = 10 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 10 , 11 , 12 ] ,
found_anchor = True , found_oldest = True ,
found_newest = False , history_limited = True ,
)
verify (
in_ids = [ 8 , 9 , anchor , 11 , 12 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = 11 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 11 , 12 ] ,
found_anchor = False , found_oldest = True ,
found_newest = False , history_limited = True ,
)
verify (
in_ids = [ 8 , 9 , anchor , 11 , 12 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = 12 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 12 ] ,
found_anchor = False , found_oldest = True ,
found_newest = True , history_limited = True ,
)
verify (
in_ids = [ 8 , 9 , anchor , 11 , 12 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = 13 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ ] ,
found_anchor = False , found_oldest = True ,
found_newest = True , history_limited = True ,
2018-03-15 11:20:55 +01:00
)
# typical 2-sided query missing anchor and grabbing an extra row
anchor = 10
verify (
in_ids = [ 7 , 9 , 11 , 13 , 15 ] ,
num_before = 2 , num_after = 2 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
out_ids = [ 7 , 9 , 11 , 13 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = False , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 7 , 9 , 11 , 13 , 15 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = 10 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 11 , 13 ] ,
found_anchor = False , found_oldest = True ,
found_newest = False , history_limited = True ,
)
verify (
in_ids = [ 7 , 9 , 11 , 13 , 15 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = 9 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 9 , 11 , 13 ] ,
found_anchor = False , found_oldest = True ,
found_newest = False , history_limited = True ,
2018-03-15 11:20:55 +01:00
)
# 2-sided query with old anchor
anchor = 100
verify (
in_ids = [ 50 , anchor , 150 , 200 ] ,
num_before = 2 , num_after = 2 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 50 , 100 , 150 , 200 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = True ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 50 , anchor , 150 , 200 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = anchor ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 100 , 150 , 200 ] ,
found_anchor = True , found_oldest = True ,
found_newest = False , history_limited = True ,
2018-03-15 11:20:55 +01:00
)
# 2-sided query with new anchor
anchor = 900
verify (
in_ids = [ 700 , 800 , anchor , 1000 ] ,
num_before = 2 , num_after = 2 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 700 , 800 , 900 , 1000 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = False ,
found_newest = True , history_limited = False ,
)
verify (
in_ids = [ 700 , 800 , anchor , 1000 ] ,
num_before = 2 , num_after = 2 ,
first_visible_message_id = anchor ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 900 , 1000 ] ,
found_anchor = True , found_oldest = True ,
found_newest = True , history_limited = True ,
2018-03-15 11:20:55 +01:00
)
# left-sided query with old anchor
anchor = 100
verify (
in_ids = [ 50 , anchor ] ,
num_before = 2 , num_after = 0 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 50 , 100 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = True ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 50 , anchor ] ,
num_before = 2 , num_after = 0 ,
first_visible_message_id = anchor ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 100 ] ,
found_anchor = True , found_oldest = True ,
found_newest = False , history_limited = True ,
2018-03-15 11:20:55 +01:00
)
# left-sided query with new anchor
anchor = 900
verify (
in_ids = [ 700 , 800 , anchor ] ,
num_before = 2 , num_after = 0 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 700 , 800 , 900 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 700 , 800 , anchor ] ,
num_before = 2 , num_after = 0 ,
first_visible_message_id = anchor ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 900 ] ,
found_anchor = True , found_oldest = True ,
found_newest = False , history_limited = True ,
2018-03-15 11:20:55 +01:00
)
# left-sided query with new anchor and extra row
anchor = 900
verify (
in_ids = [ 600 , 700 , 800 , anchor ] ,
num_before = 2 , num_after = 0 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 700 , 800 , 900 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 600 , 700 , 800 , anchor ] ,
num_before = 2 , num_after = 0 ,
first_visible_message_id = anchor ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 900 ] ,
found_anchor = True , found_oldest = True ,
found_newest = False , history_limited = True ,
2018-03-15 11:20:55 +01:00
)
# left-sided query anchored to the right
anchor = None
verify (
in_ids = [ 900 , 1000 ] ,
num_before = 2 , num_after = 0 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = True ,
out_ids = [ 900 , 1000 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = False , found_oldest = False ,
found_newest = True , history_limited = False ,
)
verify (
in_ids = [ 900 , 1000 ] ,
num_before = 2 , num_after = 0 ,
first_visible_message_id = 1000 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = True ,
out_ids = [ 1000 ] ,
found_anchor = False , found_oldest = True ,
found_newest = True , history_limited = True ,
)
verify (
in_ids = [ 900 , 1000 ] ,
num_before = 2 , num_after = 0 ,
first_visible_message_id = 1100 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = True ,
out_ids = [ ] ,
found_anchor = False , found_oldest = True ,
found_newest = True , history_limited = True ,
2018-03-15 11:20:55 +01:00
)
# right-sided query with old anchor
anchor = 100
verify (
in_ids = [ anchor , 200 , 300 , 400 ] ,
num_before = 0 , num_after = 2 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 100 , 200 , 300 ] ,
found_anchor = True , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ anchor , 200 , 300 , 400 ] ,
num_before = 0 , num_after = 2 ,
first_visible_message_id = anchor ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 100 , 200 , 300 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ anchor , 200 , 300 , 400 ] ,
num_before = 0 , num_after = 2 ,
first_visible_message_id = 300 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 300 , 400 ] ,
found_anchor = False , found_oldest = False ,
# BUG: history_limited should be False here.
found_newest = False , history_limited = False ,
2018-03-15 11:20:55 +01:00
)
# right-sided query with new anchor
anchor = 900
verify (
in_ids = [ anchor , 1000 ] ,
num_before = 0 , num_after = 2 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 900 , 1000 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = False ,
found_newest = True , history_limited = False ,
)
verify (
in_ids = [ anchor , 1000 ] ,
num_before = 0 , num_after = 2 ,
first_visible_message_id = anchor ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 900 , 1000 ] ,
found_anchor = True , found_oldest = False ,
found_newest = True , history_limited = False ,
2018-03-15 11:20:55 +01:00
)
# right-sided query with non-matching anchor
anchor = 903
verify (
in_ids = [ 1000 , 1100 , 1200 ] ,
num_before = 0 , num_after = 2 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 1000 , 1100 ] ,
found_anchor = False , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 1000 , 1100 , 1200 ] ,
num_before = 0 , num_after = 2 ,
first_visible_message_id = anchor ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 1000 , 1100 ] ,
found_anchor = False , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 1000 , 1100 , 1200 ] ,
num_before = 0 , num_after = 2 ,
first_visible_message_id = 1000 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 1000 , 1100 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = False , found_oldest = False ,
found_newest = False , history_limited = False ,
)
verify (
in_ids = [ 1000 , 1100 , 1200 ] ,
num_before = 0 , num_after = 2 ,
first_visible_message_id = 1100 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 1100 , 1200 ] ,
found_anchor = False , found_oldest = False ,
# BUG: history_limited should be False here.
found_newest = False , history_limited = False ,
2018-03-15 11:20:55 +01:00
)
# targeted query that finds row
anchor = 1000
verify (
in_ids = [ 1000 ] ,
num_before = 0 , num_after = 0 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 1000 ] ,
found_anchor = True , found_oldest = False ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
found_newest = False , history_limited = False ,
2018-09-19 14:23:02 +02:00
)
verify (
in_ids = [ 1000 ] ,
num_before = 0 , num_after = 0 ,
first_visible_message_id = anchor ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ 1000 ] ,
2018-09-19 14:23:02 +02:00
found_anchor = True , found_oldest = False ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
found_newest = False , history_limited = False ,
2018-09-19 14:23:02 +02:00
)
verify (
in_ids = [ 1000 ] ,
num_before = 0 , num_after = 0 ,
first_visible_message_id = 1100 ,
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ ] ,
found_anchor = False , found_oldest = False ,
found_newest = False , history_limited = False ,
2018-03-15 11:20:55 +01:00
)
# targeted query that finds nothing
anchor = 903
verify (
in_ids = [ ] ,
num_before = 0 , num_after = 0 ,
2018-09-19 14:23:02 +02:00
first_visible_message_id = 0 ,
2018-03-15 11:20:55 +01:00
anchor = anchor , anchored_to_left = False , anchored_to_right = False ,
out_ids = [ ] ,
2018-09-19 14:23:02 +02:00
found_anchor = False , found_oldest = False ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
found_newest = False , history_limited = False ,
2018-03-15 11:20:55 +01:00
)
2016-08-23 02:08:42 +02:00
class GetOldMessagesTest ( ZulipTestCase ) :
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def get_and_check_messages ( self ,
modified_params : Dict [ str , Union [ str , int ] ] ,
* * kwargs : Any ) - > Dict [ str , Any ] :
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
post_params : Dict [ str , Union [ str , int ] ] = { " anchor " : 1 , " num_before " : 1 , " num_after " : 1 }
2016-06-21 21:05:44 +02:00
post_params . update ( modified_params )
2017-08-26 01:01:12 +02:00
payload = self . client_get ( " /json/messages " , dict ( post_params ) ,
* * kwargs )
2016-07-24 16:50:30 +02:00
self . assert_json_success ( payload )
2019-10-02 00:10:30 +02:00
self . assertEqual ( set ( payload [ " Cache-Control " ] . split ( " , " ) ) ,
{ " must-revalidate " , " no-store " , " no-cache " , " max-age=0 " } )
2016-07-24 16:50:30 +02:00
result = ujson . loads ( payload . content )
2016-06-21 21:05:44 +02:00
self . assertIn ( " messages " , result )
self . assertIsInstance ( result [ " messages " ] , list )
for message in result [ " messages " ] :
for field in ( " content " , " content_type " , " display_recipient " ,
" avatar_url " , " recipient_id " , " sender_full_name " ,
2016-12-06 07:19:34 +01:00
" sender_short_name " , " timestamp " , " reactions " ) :
2016-06-21 21:05:44 +02:00
self . assertIn ( field , message )
2016-07-24 16:50:30 +02:00
return result
2018-01-02 18:33:28 +01:00
def message_visibility_test ( self , narrow : List [ Dict [ str , str ] ] ,
message_ids : List [ int ] , pivot_index : int ) - > None :
2018-03-15 11:21:36 +01:00
num_before = len ( message_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
post_params = dict ( narrow = ujson . dumps ( narrow ) , num_before = num_before ,
2018-01-02 18:33:28 +01:00
num_after = 0 , anchor = LARGER_THAN_MAX_MESSAGE_ID )
payload = self . client_get ( " /json/messages " , dict ( post_params ) )
self . assert_json_success ( payload )
result = ujson . loads ( payload . content )
self . assertEqual ( len ( result [ " messages " ] ) , len ( message_ids ) )
for message in result [ " messages " ] :
assert ( message [ " id " ] in message_ids )
post_params . update ( { " num_before " : len ( message_ids [ pivot_index : ] ) } )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ pivot_index ] ) :
2018-01-02 18:33:28 +01:00
payload = self . client_get ( " /json/messages " , dict ( post_params ) )
2018-03-15 11:58:25 +01:00
2018-01-02 18:33:28 +01:00
self . assert_json_success ( payload )
result = ujson . loads ( payload . content )
self . assertEqual ( len ( result [ " messages " ] ) , len ( message_ids [ pivot_index : ] ) )
for message in result [ " messages " ] :
assert ( message [ " id " ] in message_ids )
2019-08-13 20:20:36 +02:00
def get_query_ids ( self ) - > Dict [ str , Union [ int , str ] ] :
2017-05-07 17:21:26 +02:00
hamlet_user = self . example_user ( ' hamlet ' )
othello_user = self . example_user ( ' othello ' )
2016-06-21 21:05:44 +02:00
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
query_ids : Dict [ str , Union [ int , str ] ] = { }
2016-06-21 21:05:44 +02:00
scotland_stream = get_stream ( ' Scotland ' , hamlet_user . realm )
2020-02-18 17:25:43 +01:00
query_ids [ ' scotland_recipient ' ] = scotland_stream . recipient_id
2016-06-21 21:05:44 +02:00
query_ids [ ' hamlet_id ' ] = hamlet_user . id
query_ids [ ' othello_id ' ] = othello_user . id
2020-02-18 17:13:47 +01:00
query_ids [ ' hamlet_recipient ' ] = hamlet_user . recipient_id
query_ids [ ' othello_recipient ' ] = othello_user . recipient_id
2019-09-23 04:25:11 +02:00
recipients = Recipient . objects . filter (
type = Recipient . STREAM ,
type_id__in = Stream . objects . filter ( realm = hamlet_user . realm , invite_only = False ) ,
) . values ( ' id ' ) . order_by ( ' id ' )
2019-08-13 20:20:36 +02:00
query_ids [ ' public_streams_recipents ' ] = " , " . join ( str ( r [ ' id ' ] ) for r in recipients )
2016-06-21 21:05:44 +02:00
return query_ids
2017-11-05 10:51:25 +01:00
def test_content_types ( self ) - > None :
2017-10-21 03:25:39 +02:00
"""
Test old ` / json / messages ` returns reactions .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2017-10-21 03:25:39 +02:00
2018-05-10 19:00:29 +02:00
def get_content_type ( apply_markdown : bool ) - > str :
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
req : Dict [ str , Any ] = dict (
2017-10-21 03:25:39 +02:00
apply_markdown = ujson . dumps ( apply_markdown ) ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
)
2017-10-21 03:25:39 +02:00
result = self . get_and_check_messages ( req )
message = result [ ' messages ' ] [ 0 ]
return message [ ' content_type ' ]
self . assertEqual (
get_content_type ( apply_markdown = False ) ,
' text/x-markdown ' ,
)
self . assertEqual (
get_content_type ( apply_markdown = True ) ,
' text/html ' ,
)
2017-11-05 10:51:25 +01:00
def test_successful_get_messages_reaction ( self ) - > None :
2016-12-06 07:19:34 +01:00
"""
Test old ` / json / messages ` returns reactions .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-12-06 07:19:34 +01:00
messages = self . get_and_check_messages ( dict ( ) )
message_id = messages [ ' messages ' ] [ 0 ] [ ' id ' ]
2020-03-06 18:40:46 +01:00
self . login ( ' othello ' )
2017-10-02 23:47:45 +02:00
reaction_name = ' thumbs_up '
2019-10-10 19:03:09 +02:00
reaction_info = {
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
' emoji_name ' : reaction_name ,
2019-10-10 19:03:09 +02:00
}
2016-12-06 07:19:34 +01:00
2020-06-09 00:25:09 +02:00
url = f ' /json/messages/ { message_id } /reactions '
2019-10-10 19:03:09 +02:00
payload = self . client_post ( url , reaction_info )
2016-12-06 07:19:34 +01:00
self . assert_json_success ( payload )
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-12-06 07:19:34 +01:00
messages = self . get_and_check_messages ( { } )
message_to_assert = None
for message in messages [ ' messages ' ] :
if message [ ' id ' ] == message_id :
message_to_assert = message
break
2017-05-24 04:21:29 +02:00
assert ( message_to_assert is not None )
2016-12-16 02:01:34 +01:00
self . assertEqual ( len ( message_to_assert [ ' reactions ' ] ) , 1 )
self . assertEqual ( message_to_assert [ ' reactions ' ] [ 0 ] [ ' emoji_name ' ] ,
2016-12-16 05:51:27 +01:00
reaction_name )
2016-12-06 07:19:34 +01:00
2017-11-05 10:51:25 +01:00
def test_successful_get_messages ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
A call to GET / json / messages with valid parameters returns a list of
messages .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-07-24 16:50:30 +02:00
self . get_and_check_messages ( dict ( ) )
2016-06-21 21:05:44 +02:00
2020-03-12 14:17:25 +01:00
othello_email = self . example_user ( ' othello ' ) . email
2016-06-21 21:05:44 +02:00
# We have to support the legacy tuple style while there are old
# clients around, which might include third party home-grown bots.
2020-03-12 14:17:25 +01:00
self . get_and_check_messages (
dict (
narrow = ujson . dumps (
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
[ [ ' pm-with ' , othello_email ] ] ,
) ,
) ,
2020-03-12 14:17:25 +01:00
)
2016-06-21 21:05:44 +02:00
2020-03-12 14:17:25 +01:00
self . get_and_check_messages (
dict (
narrow = ujson . dumps (
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
[ dict ( operator = ' pm-with ' , operand = othello_email ) ] ,
) ,
) ,
2020-03-12 14:17:25 +01:00
)
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_client_avatar ( self ) - > None :
2017-10-20 16:52:04 +02:00
"""
The client_gravatar flag determines whether we send avatar_url .
"""
hamlet = self . example_user ( ' hamlet ' )
2020-03-06 18:40:46 +01:00
self . login_user ( hamlet )
2017-10-20 16:52:04 +02:00
2020-03-12 14:17:25 +01:00
do_set_realm_property ( hamlet . realm , " email_address_visibility " ,
Realm . EMAIL_ADDRESS_VISIBILITY_EVERYONE )
2020-03-07 11:43:05 +01:00
self . send_personal_message ( hamlet , self . example_user ( " iago " ) )
2017-10-20 16:52:04 +02:00
result = self . get_and_check_messages ( { } )
message = result [ ' messages ' ] [ 0 ]
self . assertIn ( ' gravatar.com ' , message [ ' avatar_url ' ] )
result = self . get_and_check_messages ( dict ( client_gravatar = ujson . dumps ( True ) ) )
message = result [ ' messages ' ] [ 0 ]
self . assertEqual ( message [ ' avatar_url ' ] , None )
2019-02-05 07:12:37 +01:00
# Now verify client_gravatar doesn't run with EMAIL_ADDRESS_VISIBILITY_ADMINS
do_set_realm_property ( hamlet . realm , " email_address_visibility " ,
Realm . EMAIL_ADDRESS_VISIBILITY_ADMINS )
result = self . get_and_check_messages ( dict ( client_gravatar = ujson . dumps ( True ) ) )
message = result [ ' messages ' ] [ 0 ]
self . assertIn ( ' gravatar.com ' , message [ ' avatar_url ' ] )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_narrow_pm_with ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
A request for old messages with a narrow by pm - with only returns
conversations with that user .
"""
2020-03-07 11:43:05 +01:00
me = self . example_user ( ' hamlet ' )
2016-11-29 07:22:02 +01:00
2019-08-18 00:40:35 +02:00
def dr_emails ( dr : DisplayRecipientT ) - > str :
2016-12-08 09:39:48 +01:00
assert isinstance ( dr , list )
2020-03-07 11:43:05 +01:00
return ' , ' . join ( sorted ( set ( [ r [ ' email ' ] for r in dr ] + [ me . email ] ) ) )
2016-06-21 21:05:44 +02:00
2019-08-18 00:40:35 +02:00
def dr_ids ( dr : DisplayRecipientT ) - > List [ int ] :
2019-06-08 23:21:01 +02:00
assert isinstance ( dr , list )
return list ( sorted ( set ( [ r [ ' id ' ] for r in dr ] + [ self . example_user ( ' hamlet ' ) . id ] ) ) )
2020-03-07 11:43:05 +01:00
self . send_personal_message ( me , self . example_user ( " iago " ) )
2019-06-29 15:09:35 +02:00
2017-10-28 17:38:19 +02:00
self . send_huddle_message (
me ,
2020-03-07 11:43:05 +01:00
[ self . example_user ( " iago " ) , self . example_user ( " cordelia " ) ] ,
2017-10-28 17:38:19 +02:00
)
2019-06-29 15:09:35 +02:00
# Send a 1:1 and group PM containing Aaron.
# Then deactivate aaron to test pm-with narrow includes messages
# from deactivated users also.
2020-03-07 11:43:05 +01:00
self . send_personal_message ( me , self . example_user ( " aaron " ) )
2019-06-29 15:09:35 +02:00
self . send_huddle_message (
me ,
2020-03-07 11:43:05 +01:00
[ self . example_user ( " iago " ) , self . example_user ( " aaron " ) ] ,
2019-06-29 15:09:35 +02:00
)
aaron = self . example_user ( " aaron " )
do_deactivate_user ( aaron )
self . assertFalse ( aaron . is_active )
2017-05-24 02:42:31 +02:00
personals = [ m for m in get_user_messages ( self . example_user ( ' hamlet ' ) )
2017-10-28 21:53:47 +02:00
if not m . is_stream_message ( ) ]
2017-03-14 09:15:37 +01:00
for personal in personals :
emails = dr_emails ( get_display_recipient ( personal . recipient ) )
2020-03-06 18:40:46 +01:00
self . login_user ( me )
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
narrow : List [ Dict [ str , Any ] ] = [ dict ( operator = ' pm-with ' , operand = emails ) ]
2019-06-08 23:21:01 +02:00
result = self . get_and_check_messages ( dict ( narrow = ujson . dumps ( narrow ) ) )
for message in result [ " messages " ] :
self . assertEqual ( dr_emails ( message [ ' display_recipient ' ] ) , emails )
# check passing id is conistent with passing emails as operand
ids = dr_ids ( get_display_recipient ( personal . recipient ) )
narrow = [ dict ( operator = ' pm-with ' , operand = ids ) ]
2017-03-14 09:15:37 +01:00
result = self . get_and_check_messages ( dict ( narrow = ujson . dumps ( narrow ) ) )
2016-06-21 21:05:44 +02:00
2017-03-14 09:15:37 +01:00
for message in result [ " messages " ] :
self . assertEqual ( dr_emails ( message [ ' display_recipient ' ] ) , emails )
2016-06-21 21:05:44 +02:00
2018-01-02 18:33:28 +01:00
def test_get_visible_messages_with_narrow_pm_with ( self ) - > None :
2020-03-07 11:43:05 +01:00
me = self . example_user ( ' hamlet ' )
2020-03-06 18:40:46 +01:00
self . login_user ( me )
2018-01-02 18:33:28 +01:00
self . subscribe ( self . example_user ( " hamlet " ) , ' Scotland ' )
message_ids = [ ]
for i in range ( 5 ) :
2020-03-07 11:43:05 +01:00
message_ids . append ( self . send_personal_message ( me , self . example_user ( " iago " ) ) )
2018-01-02 18:33:28 +01:00
2020-03-12 14:17:25 +01:00
narrow = [ dict ( operator = ' pm-with ' , operand = self . example_user ( " iago " ) . email ) ]
2018-01-02 18:33:28 +01:00
self . message_visibility_test ( narrow , message_ids , 2 )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_narrow_group_pm_with ( self ) - > None :
2017-03-23 23:35:37 +01:00
"""
A request for old messages with a narrow by group - pm - with only returns
group - private conversations with that user .
"""
2020-03-07 11:43:05 +01:00
me = self . example_user ( " hamlet " )
2017-03-23 23:35:37 +01:00
2020-03-12 14:17:25 +01:00
iago = self . example_user ( " iago " )
cordelia = self . example_user ( " cordelia " )
othello = self . example_user ( " othello " )
2017-03-23 23:35:37 +01:00
matching_message_ids = [ ]
2017-10-28 17:38:19 +02:00
matching_message_ids . append (
self . send_huddle_message (
me ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
[ iago , cordelia , othello ] ,
2017-10-28 17:38:19 +02:00
) ,
)
matching_message_ids . append (
self . send_huddle_message (
me ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
[ cordelia , othello ] ,
2017-10-28 17:38:19 +02:00
) ,
)
2017-03-23 23:35:37 +01:00
non_matching_message_ids = [ ]
2017-10-28 17:38:19 +02:00
non_matching_message_ids . append (
2020-03-12 14:17:25 +01:00
self . send_personal_message ( me , cordelia ) ,
2017-10-28 17:38:19 +02:00
)
non_matching_message_ids . append (
self . send_huddle_message (
me ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
[ iago , othello ] ,
2017-10-28 17:38:19 +02:00
) ,
)
non_matching_message_ids . append (
self . send_huddle_message (
2020-03-07 11:43:05 +01:00
self . example_user ( " cordelia " ) ,
2020-03-12 14:17:25 +01:00
[ iago , othello ] ,
2017-10-28 17:38:19 +02:00
) ,
)
2017-03-23 23:35:37 +01:00
2020-03-06 18:40:46 +01:00
self . login_user ( me )
2020-03-12 14:17:25 +01:00
test_operands = [ cordelia . email , cordelia . id ]
2019-07-14 19:31:28 +02:00
for operand in test_operands :
narrow = [ dict ( operator = ' group-pm-with ' , operand = operand ) ]
result = self . get_and_check_messages ( dict ( narrow = ujson . dumps ( narrow ) ) )
for message in result [ " messages " ] :
self . assertIn ( message [ " id " ] , matching_message_ids )
self . assertNotIn ( message [ " id " ] , non_matching_message_ids )
2017-03-23 23:35:37 +01:00
2018-01-02 18:33:28 +01:00
def test_get_visible_messages_with_narrow_group_pm_with ( self ) - > None :
2020-03-07 11:43:05 +01:00
me = self . example_user ( ' hamlet ' )
2020-03-06 18:40:46 +01:00
self . login_user ( me )
2018-01-02 18:33:28 +01:00
2020-03-12 14:17:25 +01:00
iago = self . example_user ( " iago " )
cordelia = self . example_user ( " cordelia " )
othello = self . example_user ( " othello " )
2018-01-02 18:33:28 +01:00
message_ids = [ ]
message_ids . append (
self . send_huddle_message (
me ,
2020-03-12 14:17:25 +01:00
[ iago , cordelia , othello ] ,
2018-01-02 18:33:28 +01:00
) ,
)
message_ids . append (
self . send_huddle_message (
me ,
2020-03-12 14:17:25 +01:00
[ cordelia , othello ] ,
2018-01-02 18:33:28 +01:00
) ,
)
message_ids . append (
self . send_huddle_message (
me ,
2020-03-12 14:17:25 +01:00
[ cordelia , iago ] ,
2018-01-02 18:33:28 +01:00
) ,
)
2020-03-12 14:17:25 +01:00
narrow = [ dict ( operator = ' group-pm-with ' , operand = cordelia . email ) ]
2018-01-02 18:33:28 +01:00
self . message_visibility_test ( narrow , message_ids , 1 )
2017-11-05 10:51:25 +01:00
def test_include_history ( self ) - > None :
2017-11-07 16:48:06 +01:00
hamlet = self . example_user ( ' hamlet ' )
cordelia = self . example_user ( ' cordelia ' )
stream_name = ' test stream '
self . subscribe ( cordelia , stream_name )
2020-03-07 11:43:05 +01:00
old_message_id = self . send_stream_message ( cordelia , stream_name , content = ' foo ' )
2017-11-07 16:48:06 +01:00
self . subscribe ( hamlet , stream_name )
content = ' hello @**King Hamlet** '
2020-03-07 11:43:05 +01:00
new_message_id = self . send_stream_message ( cordelia , stream_name , content = content )
2017-11-07 16:48:06 +01:00
2020-03-06 18:40:46 +01:00
self . login_user ( hamlet )
2017-11-07 16:48:06 +01:00
narrow = [
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
dict ( operator = ' stream ' , operand = stream_name ) ,
2017-11-07 16:48:06 +01:00
]
req = dict (
narrow = ujson . dumps ( narrow ) ,
anchor = LARGER_THAN_MAX_MESSAGE_ID ,
num_before = 100 ,
num_after = 100 ,
)
payload = self . client_get ( ' /json/messages ' , req )
self . assert_json_success ( payload )
result = ujson . loads ( payload . content )
messages = result [ ' messages ' ]
self . assertEqual ( len ( messages ) , 2 )
for message in messages :
if message [ ' id ' ] == old_message_id :
old_message = message
elif message [ ' id ' ] == new_message_id :
new_message = message
self . assertEqual ( old_message [ ' flags ' ] , [ ' read ' , ' historical ' ] )
self . assertEqual ( new_message [ ' flags ' ] , [ ' mentioned ' ] )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_narrow_stream ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
A request for old messages with a narrow by stream only returns
messages for that stream .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2017-02-22 21:23:22 +01:00
# We need to subscribe to a stream and then send a message to
2016-06-21 21:05:44 +02:00
# it to ensure that we actually have a stream message in this
# narrow view.
2017-08-25 06:01:29 +02:00
self . subscribe ( self . example_user ( " hamlet " ) , ' Scotland ' )
2020-03-07 11:43:05 +01:00
self . send_stream_message ( self . example_user ( " hamlet " ) , " Scotland " )
2017-05-07 17:21:26 +02:00
messages = get_user_messages ( self . example_user ( ' hamlet ' ) )
2017-10-28 21:53:47 +02:00
stream_messages = [ msg for msg in messages if msg . is_stream_message ( ) ]
2016-06-21 21:05:44 +02:00
stream_name = get_display_recipient ( stream_messages [ 0 ] . recipient )
2019-08-07 17:32:19 +02:00
assert isinstance ( stream_name , str )
stream_id = get_stream ( stream_name , stream_messages [ 0 ] . get_realm ( ) ) . id
2019-08-07 18:20:17 +02:00
stream_recipient_id = stream_messages [ 0 ] . recipient . id
2016-06-21 21:05:44 +02:00
2019-08-07 17:32:19 +02:00
for operand in [ stream_name , stream_id ] :
narrow = [ dict ( operator = ' stream ' , operand = operand ) ]
result = self . get_and_check_messages ( dict ( narrow = ujson . dumps ( narrow ) ) )
2016-06-21 21:05:44 +02:00
2019-08-07 17:32:19 +02:00
for message in result [ " messages " ] :
self . assertEqual ( message [ " type " ] , " stream " )
self . assertEqual ( message [ " recipient_id " ] , stream_recipient_id )
2016-06-21 21:05:44 +02:00
2018-01-02 18:33:28 +01:00
def test_get_visible_messages_with_narrow_stream ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2018-01-02 18:33:28 +01:00
self . subscribe ( self . example_user ( " hamlet " ) , ' Scotland ' )
message_ids = [ ]
for i in range ( 5 ) :
2020-03-07 11:43:05 +01:00
message_ids . append ( self . send_stream_message ( self . example_user ( " iago " ) , " Scotland " ) )
2018-01-02 18:33:28 +01:00
narrow = [ dict ( operator = ' stream ' , operand = " Scotland " ) ]
self . message_visibility_test ( narrow , message_ids , 2 )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_narrow_stream_mit_unicode_regex ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
A request for old messages for a user in the mit . edu relam with unicode
stream name should be correctly escaped in the database query .
"""
2020-03-06 18:40:46 +01:00
user = self . mit_user ( ' starnine ' )
self . login_user ( user )
2016-06-21 21:05:44 +02:00
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
2020-04-09 21:51:58 +02:00
lambda_stream_name = " \u03bb -stream "
2020-03-06 18:40:46 +01:00
stream = self . subscribe ( user , lambda_stream_name )
2017-10-08 21:16:51 +02:00
self . assertTrue ( stream . is_in_zephyr_realm )
2016-06-21 21:05:44 +02:00
2020-04-09 21:51:58 +02:00
lambda_stream_d_name = " \u03bb -stream.d "
2020-03-06 18:40:46 +01:00
self . subscribe ( user , lambda_stream_d_name )
2016-06-21 21:05:44 +02:00
2020-04-09 21:51:58 +02:00
self . send_stream_message ( user , " \u03bb -stream " )
self . send_stream_message ( user , " \u03bb -stream.d " )
2016-06-21 21:05:44 +02:00
2020-04-09 21:51:58 +02:00
narrow = [ dict ( operator = ' stream ' , operand = ' \u03bb -stream ' ) ]
2016-07-24 16:50:30 +02:00
result = self . get_and_check_messages ( dict ( num_after = 2 ,
2017-08-26 01:01:12 +02:00
narrow = ujson . dumps ( narrow ) ) ,
subdomain = " zephyr " )
2016-06-21 21:05:44 +02:00
2017-05-24 21:21:35 +02:00
messages = get_user_messages ( self . mit_user ( " starnine " ) )
2017-10-28 21:53:47 +02:00
stream_messages = [ msg for msg in messages if msg . is_stream_message ( ) ]
2016-06-21 21:05:44 +02:00
self . assertEqual ( len ( result [ " messages " ] ) , 2 )
for i , message in enumerate ( result [ " messages " ] ) :
self . assertEqual ( message [ " type " ] , " stream " )
stream_id = stream_messages [ i ] . recipient . id
self . assertEqual ( message [ " recipient_id " ] , stream_id )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_narrow_topic_mit_unicode_regex ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
2017-02-22 21:23:22 +01:00
A request for old messages for a user in the mit . edu realm with unicode
2016-06-21 21:05:44 +02:00
topic name should be correctly escaped in the database query .
"""
2017-05-23 02:33:53 +02:00
mit_user_profile = self . mit_user ( " starnine " )
2020-03-06 18:40:46 +01:00
self . login_user ( mit_user_profile )
2016-06-21 21:05:44 +02:00
# We need to susbcribe to a stream and then send a message to
# it to ensure that we actually have a stream message in this
# narrow view.
2017-08-25 06:01:29 +02:00
self . subscribe ( mit_user_profile , " Scotland " )
2020-04-09 21:51:58 +02:00
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " \u03bb -topic " )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " \u03bb -topic.d " )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " \u03bb -topic.d.d " )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " \u03bb -topic.d.d.d " )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " \u03bb -topic.d.d.d.d " )
2016-06-21 21:05:44 +02:00
2020-04-09 21:51:58 +02:00
narrow = [ dict ( operator = ' topic ' , operand = ' \u03bb -topic ' ) ]
2017-08-26 01:01:12 +02:00
result = self . get_and_check_messages (
dict ( num_after = 100 , narrow = ujson . dumps ( narrow ) ) ,
subdomain = " zephyr " )
2016-06-21 21:05:44 +02:00
2017-05-23 02:33:53 +02:00
messages = get_user_messages ( mit_user_profile )
2017-10-28 21:53:47 +02:00
stream_messages = [ msg for msg in messages if msg . is_stream_message ( ) ]
2017-02-22 21:23:22 +01:00
self . assertEqual ( len ( result [ " messages " ] ) , 5 )
for i , message in enumerate ( result [ " messages " ] ) :
self . assertEqual ( message [ " type " ] , " stream " )
stream_id = stream_messages [ i ] . recipient . id
self . assertEqual ( message [ " recipient_id " ] , stream_id )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_narrow_topic_mit_personal ( self ) - > None :
2017-02-22 21:23:22 +01:00
"""
We handle . d grouping for MIT realm personal messages correctly .
"""
2017-05-23 02:33:53 +02:00
mit_user_profile = self . mit_user ( " starnine " )
2017-11-18 00:11:24 +01:00
# We need to susbcribe to a stream and then send a message to
2017-02-22 21:23:22 +01:00
# it to ensure that we actually have a stream message in this
# narrow view.
2020-03-06 18:40:46 +01:00
self . login_user ( mit_user_profile )
2017-08-25 06:01:29 +02:00
self . subscribe ( mit_user_profile , " Scotland " )
2017-02-22 21:23:22 +01:00
2020-04-09 21:51:58 +02:00
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " .d.d " )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " PERSONAL " )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = ' (instance " " ).d ' )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " .d.d.d " )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " personal.d " )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = ' (instance " " ) ' )
self . send_stream_message ( mit_user_profile , " Scotland " , topic_name = " .d.d.d.d " )
2017-02-22 21:23:22 +01:00
2020-04-09 21:51:58 +02:00
narrow = [ dict ( operator = ' topic ' , operand = ' personal.d.d ' ) ]
2017-08-26 01:01:12 +02:00
result = self . get_and_check_messages (
dict ( num_before = 50 ,
num_after = 50 ,
narrow = ujson . dumps ( narrow ) ) ,
subdomain = " zephyr " )
2017-02-22 21:23:22 +01:00
2017-05-23 02:33:53 +02:00
messages = get_user_messages ( mit_user_profile )
2017-10-28 21:53:47 +02:00
stream_messages = [ msg for msg in messages if msg . is_stream_message ( ) ]
2017-02-22 21:23:22 +01:00
self . assertEqual ( len ( result [ " messages " ] ) , 7 )
2016-06-21 21:05:44 +02:00
for i , message in enumerate ( result [ " messages " ] ) :
self . assertEqual ( message [ " type " ] , " stream " )
stream_id = stream_messages [ i ] . recipient . id
self . assertEqual ( message [ " recipient_id " ] , stream_id )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_narrow_sender ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
A request for old messages with a narrow by sender only returns
messages sent by that person .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2020-03-12 14:17:25 +01:00
hamlet = self . example_user ( ' hamlet ' )
othello = self . example_user ( ' othello ' )
iago = self . example_user ( ' iago ' )
2016-06-21 21:05:44 +02:00
# We need to send a message here to ensure that we actually
# have a stream message in this narrow view.
2020-03-12 14:17:25 +01:00
self . send_stream_message ( hamlet , " Scotland " )
self . send_stream_message ( othello , " Scotland " )
self . send_personal_message ( othello , hamlet )
self . send_stream_message ( iago , " Scotland " )
2016-06-21 21:05:44 +02:00
2020-03-12 14:17:25 +01:00
test_operands = [ othello . email , othello . id ]
2019-07-13 01:48:04 +02:00
for operand in test_operands :
narrow = [ dict ( operator = ' sender ' , operand = operand ) ]
result = self . get_and_check_messages ( dict ( narrow = ujson . dumps ( narrow ) ) )
2016-06-21 21:05:44 +02:00
2019-07-13 01:48:04 +02:00
for message in result [ " messages " ] :
2020-03-12 14:17:25 +01:00
self . assertEqual ( message [ " sender_id " ] , othello . id )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def _update_tsvector_index ( self ) - > None :
2016-09-19 16:34:01 +02:00
# We use brute force here and update our text search index
# for the entire zerver_message table (which is small in test
# mode). In production there is an async process which keeps
# the search index up to date.
with connection . cursor ( ) as cursor :
cursor . execute ( """
UPDATE zerver_message SET
search_tsvector = to_tsvector ( ' zulip.english_us_search ' ,
subject | | rendered_content )
""" )
2016-09-19 20:18:33 +02:00
@override_settings ( USING_PGROONGA = False )
2017-11-05 10:51:25 +01:00
def test_messages_in_narrow ( self ) - > None :
2020-03-07 11:43:05 +01:00
user = self . example_user ( " cordelia " )
2020-03-06 18:40:46 +01:00
self . login_user ( user )
2016-09-19 20:18:33 +02:00
2018-05-10 19:00:29 +02:00
def send ( content : str ) - > int :
2017-10-28 17:38:19 +02:00
msg_id = self . send_stream_message (
2020-03-07 11:43:05 +01:00
sender = user ,
2017-10-28 17:38:19 +02:00
stream_name = " Verona " ,
2016-09-19 20:18:33 +02:00
content = content ,
)
return msg_id
good_id = send ( ' KEYWORDMATCH and should work ' )
bad_id = send ( ' no match ' )
msg_ids = [ good_id , bad_id ]
send ( ' KEYWORDMATCH but not in msg_ids ' )
self . _update_tsvector_index ( )
narrow = [
dict ( operator = ' search ' , operand = ' KEYWORDMATCH ' ) ,
]
raw_params = dict ( msg_ids = msg_ids , narrow = narrow )
params = { k : ujson . dumps ( v ) for k , v in raw_params . items ( ) }
2017-07-31 21:09:55 +02:00
result = self . client_get ( ' /json/messages/matches_narrow ' , params )
2016-09-19 20:18:33 +02:00
self . assert_json_success ( result )
2017-08-17 08:41:20 +02:00
messages = result . json ( ) [ ' messages ' ]
2016-09-19 20:18:33 +02:00
self . assertEqual ( len ( list ( messages . keys ( ) ) ) , 1 )
message = messages [ str ( good_id ) ]
self . assertEqual ( message [ ' match_content ' ] ,
2020-04-09 21:51:58 +02:00
' <p><span class= " highlight " >KEYWORDMATCH</span> and should work</p> ' )
2016-09-19 20:18:33 +02:00
2016-04-24 17:08:51 +02:00
@override_settings ( USING_PGROONGA = False )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_search ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' cordelia ' )
2016-07-17 04:07:07 +02:00
messages_to_search = [
( ' breakfast ' , ' there are muffins in the conference room ' ) ,
( ' lunch plans ' , ' I am hungry! ' ) ,
( ' meetings ' , ' discuss lunch after lunch ' ) ,
( ' meetings ' , ' please bring your laptops to take notes ' ) ,
( ' dinner ' , ' Anybody staying late tonight? ' ) ,
2017-07-12 08:19:13 +02:00
( ' urltest ' , ' https://google.com ' ) ,
2020-04-09 21:51:58 +02:00
( ' 日本 ' , ' こんに ちは 。 今日は いい 天気ですね。 ' ) ,
( ' 日本 ' , ' 今朝はごはんを食べました。 ' ) ,
( ' 日本 ' , ' 昨日、日本 のお菓子を送りました。 ' ) ,
( ' english ' , ' I want to go to 日本! ' ) ,
2016-07-17 04:07:07 +02:00
]
2017-08-15 18:20:45 +02:00
next_message_id = self . get_last_message ( ) . id + 1
2020-03-12 14:17:25 +01:00
cordelia = self . example_user ( ' cordelia ' )
2016-07-17 04:07:07 +02:00
for topic , content in messages_to_search :
2017-10-28 17:38:19 +02:00
self . send_stream_message (
2020-03-12 14:17:25 +01:00
sender = cordelia ,
2017-10-28 17:38:19 +02:00
stream_name = " Verona " ,
2016-07-17 04:07:07 +02:00
content = content ,
2017-10-28 17:38:19 +02:00
topic_name = topic ,
2016-07-17 04:07:07 +02:00
)
2016-09-19 16:34:01 +02:00
self . _update_tsvector_index ( )
2016-07-17 04:07:07 +02:00
narrow = [
2020-03-12 14:17:25 +01:00
dict ( operator = ' sender ' , operand = cordelia . email ) ,
2016-07-17 04:07:07 +02:00
dict ( operator = ' search ' , operand = ' lunch ' ) ,
]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2016-07-17 04:07:07 +02:00
narrow = ujson . dumps ( narrow ) ,
2017-08-15 18:20:45 +02:00
anchor = next_message_id ,
2017-08-16 16:37:06 +02:00
num_before = 0 ,
2016-07-17 04:07:07 +02:00
num_after = 10 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2016-07-17 04:07:07 +02:00
self . assertEqual ( len ( result [ ' messages ' ] ) , 2 )
messages = result [ ' messages ' ]
2017-07-12 08:19:13 +02:00
narrow = [ dict ( operator = ' search ' , operand = ' https://google.com ' ) ]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
link_search_result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2017-07-12 08:19:13 +02:00
narrow = ujson . dumps ( narrow ) ,
2017-08-15 18:20:45 +02:00
anchor = next_message_id ,
2017-08-16 16:37:06 +02:00
num_before = 0 ,
2017-07-12 08:19:13 +02:00
num_after = 10 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2017-07-12 08:19:13 +02:00
self . assertEqual ( len ( link_search_result [ ' messages ' ] ) , 1 )
self . assertEqual ( link_search_result [ ' messages ' ] [ 0 ] [ ' match_content ' ] ,
2020-05-09 03:44:56 +02:00
' <p><a href= " https://google.com " >https://<span class= " highlight " >google.com</span></a></p> ' )
2017-07-12 08:19:13 +02:00
2020-03-19 17:44:55 +01:00
( meeting_message , ) = [
m for m in messages
if m [ TOPIC_NAME ] == ' meetings '
]
2016-07-17 04:07:07 +02:00
self . assertEqual (
2018-11-12 13:54:19 +01:00
meeting_message [ MATCH_TOPIC ] ,
2016-07-17 04:07:07 +02:00
' meetings ' )
self . assertEqual (
meeting_message [ ' match_content ' ] ,
' <p>discuss <span class= " highlight " >lunch</span> after ' +
' <span class= " highlight " >lunch</span></p> ' )
2020-03-19 17:44:55 +01:00
( lunch_message , ) = [
m for m in messages
if m [ TOPIC_NAME ] == ' lunch plans '
]
2016-07-17 04:07:07 +02:00
self . assertEqual (
2020-03-19 17:44:55 +01:00
lunch_message [ MATCH_TOPIC ] ,
2016-07-17 04:07:07 +02:00
' <span class= " highlight " >lunch</span> plans ' )
self . assertEqual (
2020-03-19 17:44:55 +01:00
lunch_message [ ' match_content ' ] ,
2016-07-17 04:07:07 +02:00
' <p>I am hungry!</p> ' )
2017-01-16 16:53:20 +01:00
# Should not crash when multiple search operands are present
multi_search_narrow = [
dict ( operator = ' search ' , operand = ' discuss ' ) ,
dict ( operator = ' search ' , operand = ' after ' ) ,
]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
multi_search_result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2017-01-16 16:53:20 +01:00
narrow = ujson . dumps ( multi_search_narrow ) ,
2017-08-15 18:20:45 +02:00
anchor = next_message_id ,
2017-01-16 16:53:20 +01:00
num_after = 10 ,
2017-08-16 16:37:06 +02:00
num_before = 0 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2017-01-16 16:53:20 +01:00
self . assertEqual ( len ( multi_search_result [ ' messages ' ] ) , 1 )
self . assertEqual ( multi_search_result [ ' messages ' ] [ 0 ] [ ' match_content ' ] , ' <p><span class= " highlight " >discuss</span> lunch <span class= " highlight " >after</span> lunch</p> ' )
2017-10-31 13:00:37 +01:00
# Test searching in messages with unicode characters
narrow = [
2020-04-09 21:51:58 +02:00
dict ( operator = ' search ' , operand = ' 日本 ' ) ,
2017-10-31 13:00:37 +01:00
]
result = self . get_and_check_messages ( dict (
narrow = ujson . dumps ( narrow ) ,
anchor = next_message_id ,
num_after = 10 ,
num_before = 0 ,
2017-10-31 18:45:54 +01:00
) )
2017-10-31 13:00:37 +01:00
self . assertEqual ( len ( result [ ' messages ' ] ) , 4 )
messages = result [ ' messages ' ]
2020-03-19 17:44:55 +01:00
japanese_message = [
m for m in messages
2020-04-09 21:51:58 +02:00
if m [ TOPIC_NAME ] == ' 日本 ' ] [ - 1 ]
2017-10-31 13:00:37 +01:00
self . assertEqual (
2018-11-12 13:54:19 +01:00
japanese_message [ MATCH_TOPIC ] ,
2020-04-09 21:51:58 +02:00
' <span class= " highlight " >日本</span> ' )
2017-10-31 13:00:37 +01:00
self . assertEqual (
japanese_message [ ' match_content ' ] ,
2020-04-09 21:51:58 +02:00
' <p>昨日、<span class= " highlight " >日本</span> ' +
' のお菓子を送りました。</p> ' )
2017-10-31 13:00:37 +01:00
2020-03-19 17:44:55 +01:00
( english_message , ) = [
m for m in messages
if m [ TOPIC_NAME ] == ' english '
]
2017-10-31 13:00:37 +01:00
self . assertEqual (
2018-11-12 13:54:19 +01:00
english_message [ MATCH_TOPIC ] ,
2017-10-31 13:00:37 +01:00
' english ' )
self . assertIn (
english_message [ ' match_content ' ] ,
2020-04-09 21:51:58 +02:00
' <p>I want to go to <span class= " highlight " >日本</span>!</p> ' )
2017-10-31 13:00:37 +01:00
2017-10-31 18:24:00 +01:00
# Multiple search operands with unicode
multi_search_narrow = [
dict ( operator = ' search ' , operand = ' ちは ' ) ,
dict ( operator = ' search ' , operand = ' 今日は ' ) ,
]
multi_search_result = self . get_and_check_messages ( dict (
narrow = ujson . dumps ( multi_search_narrow ) ,
anchor = next_message_id ,
num_after = 10 ,
num_before = 0 ,
) )
self . assertEqual ( len ( multi_search_result [ ' messages ' ] ) , 1 )
self . assertEqual ( multi_search_result [ ' messages ' ] [ 0 ] [ ' match_content ' ] ,
' <p>こんに <span class= " highlight " >ちは</span> 。 <span class= " highlight " >今日は</span> いい 天気ですね。</p> ' )
2018-01-02 18:33:28 +01:00
@override_settings ( USING_PGROONGA = False )
def test_get_visible_messages_with_search ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2018-01-02 18:33:28 +01:00
self . subscribe ( self . example_user ( " hamlet " ) , ' Scotland ' )
messages_to_search = [
( " Gryffindor " , " Hogwart ' s house which values courage, bravery, nerve, and chivalry " ) ,
( " Hufflepuff " , " Hogwart ' s house which values hard work, patience, justice, and loyalty. " ) ,
( " Ravenclaw " , " Hogwart ' s house which values intelligence, creativity, learning, and wit " ) ,
( " Slytherin " , " Hogwart ' s house which values ambition, cunning, leadership, and resourcefulness " ) ,
]
message_ids = [ ]
for topic , content in messages_to_search :
2020-03-07 11:43:05 +01:00
message_ids . append ( self . send_stream_message ( self . example_user ( " iago " ) , " Scotland " ,
2018-01-02 18:33:28 +01:00
topic_name = topic , content = content ) )
self . _update_tsvector_index ( )
narrow = [ dict ( operator = ' search ' , operand = " Hogwart ' s " ) ]
self . message_visibility_test ( narrow , message_ids , 2 )
2017-02-23 00:21:26 +01:00
@override_settings ( USING_PGROONGA = False )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_search_not_subscribed ( self ) - > None :
2017-02-23 00:21:26 +01:00
""" Verify support for searching a stream you ' re not subscribed to """
2017-08-25 06:01:29 +02:00
self . subscribe ( self . example_user ( " hamlet " ) , " newstream " )
2017-10-28 17:38:19 +02:00
self . send_stream_message (
2020-03-07 11:43:05 +01:00
sender = self . example_user ( " hamlet " ) ,
2017-10-28 17:38:19 +02:00
stream_name = " newstream " ,
2017-02-23 00:21:26 +01:00
content = " Public special content! " ,
2017-10-28 17:38:19 +02:00
topic_name = " new " ,
2017-02-23 00:21:26 +01:00
)
self . _update_tsvector_index ( )
2020-03-06 18:40:46 +01:00
self . login ( ' cordelia ' )
2017-02-23 00:21:26 +01:00
stream_search_narrow = [
dict ( operator = ' search ' , operand = ' special ' ) ,
dict ( operator = ' stream ' , operand = ' newstream ' ) ,
]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
stream_search_result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2017-02-23 00:21:26 +01:00
narrow = ujson . dumps ( stream_search_narrow ) ,
anchor = 0 ,
num_after = 10 ,
num_before = 10 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2017-02-23 00:21:26 +01:00
self . assertEqual ( len ( stream_search_result [ ' messages ' ] ) , 1 )
self . assertEqual ( stream_search_result [ ' messages ' ] [ 0 ] [ ' match_content ' ] ,
' <p>Public <span class= " highlight " >special</span> content!</p> ' )
2016-04-24 17:08:51 +02:00
@override_settings ( USING_PGROONGA = True )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_search_pgroonga ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' cordelia ' )
2016-04-24 17:08:51 +02:00
2017-08-15 18:20:45 +02:00
next_message_id = self . get_last_message ( ) . id + 1
2016-04-24 17:08:51 +02:00
messages_to_search = [
2020-04-09 21:51:58 +02:00
( ' 日本語 ' , ' こんにちは。今日はいい天気ですね。 ' ) ,
( ' 日本語 ' , ' 今朝はごはんを食べました。 ' ) ,
( ' 日本語 ' , ' 昨日、日本のお菓子を送りました。 ' ) ,
( ' english ' , ' I want to go to 日本! ' ) ,
2017-04-06 15:59:56 +02:00
( ' english ' , ' Can you speak https://en.wikipedia.org/wiki/Japanese? ' ) ,
2017-07-12 08:19:13 +02:00
( ' english ' , ' https://google.com ' ) ,
2018-05-19 05:39:13 +02:00
( ' bread & butter ' , ' chalk & cheese ' ) ,
2016-04-24 17:08:51 +02:00
]
for topic , content in messages_to_search :
2017-10-28 17:38:19 +02:00
self . send_stream_message (
2020-03-07 11:43:05 +01:00
sender = self . example_user ( " cordelia " ) ,
2017-10-28 17:38:19 +02:00
stream_name = " Verona " ,
2016-04-24 17:08:51 +02:00
content = content ,
2017-10-28 17:38:19 +02:00
topic_name = topic ,
2016-04-24 17:08:51 +02:00
)
# We use brute force here and update our text search index
# for the entire zerver_message table (which is small in test
# mode). In production there is an async process which keeps
# the search index up to date.
with connection . cursor ( ) as cursor :
cursor . execute ( """
UPDATE zerver_message SET
2018-05-19 05:39:13 +02:00
search_pgroonga = escape_html ( subject ) | | ' ' | | rendered_content
2016-04-24 17:08:51 +02:00
""" )
narrow = [
2020-04-09 21:51:58 +02:00
dict ( operator = ' search ' , operand = ' 日本 ' ) ,
2016-04-24 17:08:51 +02:00
]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2016-04-24 17:08:51 +02:00
narrow = ujson . dumps ( narrow ) ,
2017-08-15 18:20:45 +02:00
anchor = next_message_id ,
2016-04-24 17:08:51 +02:00
num_after = 10 ,
2017-08-16 16:37:06 +02:00
num_before = 0 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2016-04-24 17:08:51 +02:00
self . assertEqual ( len ( result [ ' messages ' ] ) , 4 )
messages = result [ ' messages ' ]
2020-04-09 21:51:58 +02:00
japanese_message = [ m for m in messages if m [ TOPIC_NAME ] == ' 日本語 ' ] [ - 1 ]
2016-04-24 17:08:51 +02:00
self . assertEqual (
2018-11-12 13:54:19 +01:00
japanese_message [ MATCH_TOPIC ] ,
2020-04-09 21:51:58 +02:00
' <span class= " highlight " >日本</span>語 ' )
2016-04-24 17:08:51 +02:00
self . assertEqual (
japanese_message [ ' match_content ' ] ,
2020-04-09 21:51:58 +02:00
' <p>昨日、<span class= " highlight " >日本</span>の ' +
' お菓子を送りました。</p> ' )
2016-04-24 17:08:51 +02:00
2018-11-12 13:54:19 +01:00
english_message = [ m for m in messages if m [ TOPIC_NAME ] == ' english ' ] [ 0 ]
2016-04-24 17:08:51 +02:00
self . assertEqual (
2018-11-12 13:54:19 +01:00
english_message [ MATCH_TOPIC ] ,
2016-04-24 17:08:51 +02:00
' english ' )
2017-03-30 06:59:45 +02:00
self . assertIn (
2016-04-24 17:08:51 +02:00
english_message [ ' match_content ' ] ,
2017-03-29 12:02:16 +02:00
# NOTE: The whitespace here is off due to a pgroonga bug.
2017-03-30 06:59:45 +02:00
# This bug is a pgroonga regression and according to one of
# the author, this should be fixed in its next release.
2020-04-09 21:51:58 +02:00
[ ' <p>I want to go to <span class= " highlight " >日本</span>!</p> ' , # This is correct.
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
' <p>I want to go to<span class= " highlight " > 日本</span>!</p> ' ] )
2016-07-17 04:07:07 +02:00
2017-01-16 16:53:20 +01:00
# Should not crash when multiple search operands are present
multi_search_narrow = [
dict ( operator = ' search ' , operand = ' can ' ) ,
dict ( operator = ' search ' , operand = ' speak ' ) ,
2017-04-06 15:59:56 +02:00
dict ( operator = ' search ' , operand = ' wiki ' ) ,
2017-01-16 16:53:20 +01:00
]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
multi_search_result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2017-01-16 16:53:20 +01:00
narrow = ujson . dumps ( multi_search_narrow ) ,
2017-08-15 18:20:45 +02:00
anchor = next_message_id ,
2017-01-16 16:53:20 +01:00
num_after = 10 ,
2017-08-16 16:37:06 +02:00
num_before = 0 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2017-01-16 16:53:20 +01:00
self . assertEqual ( len ( multi_search_result [ ' messages ' ] ) , 1 )
2017-04-06 15:59:56 +02:00
self . assertEqual ( multi_search_result [ ' messages ' ] [ 0 ] [ ' match_content ' ] ,
2020-05-09 03:44:56 +02:00
' <p><span class= " highlight " >Can</span> you <span class= " highlight " >speak</span> <a href= " https://en.wikipedia.org/wiki/Japanese " >https://en.<span class= " highlight " >wiki</span>pedia.org/<span class= " highlight " >wiki</span>/Japanese</a>?</p> ' )
2017-01-16 16:53:20 +01:00
2017-10-31 18:24:00 +01:00
# Multiple search operands with unicode
multi_search_narrow = [
dict ( operator = ' search ' , operand = ' 朝は ' ) ,
dict ( operator = ' search ' , operand = ' べました ' ) ,
]
multi_search_result = self . get_and_check_messages ( dict (
narrow = ujson . dumps ( multi_search_narrow ) ,
anchor = next_message_id ,
num_after = 10 ,
num_before = 0 ,
) )
self . assertEqual ( len ( multi_search_result [ ' messages ' ] ) , 1 )
self . assertEqual ( multi_search_result [ ' messages ' ] [ 0 ] [ ' match_content ' ] ,
' <p>今<span class= " highlight " >朝は</span>ごはんを食<span class= " highlight " >べました</span>。</p> ' )
2017-07-12 08:19:13 +02:00
narrow = [ dict ( operator = ' search ' , operand = ' https://google.com ' ) ]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
link_search_result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2017-07-12 08:19:13 +02:00
narrow = ujson . dumps ( narrow ) ,
2017-08-15 18:20:45 +02:00
anchor = next_message_id ,
2017-07-12 08:19:13 +02:00
num_after = 10 ,
2017-08-16 16:37:06 +02:00
num_before = 0 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2017-07-12 08:19:13 +02:00
self . assertEqual ( len ( link_search_result [ ' messages ' ] ) , 1 )
self . assertEqual ( link_search_result [ ' messages ' ] [ 0 ] [ ' match_content ' ] ,
2020-05-09 03:44:56 +02:00
' <p><a href= " https://google.com " ><span class= " highlight " >https://google.com</span></a></p> ' )
2017-07-12 08:19:13 +02:00
2018-05-19 05:39:13 +02:00
# Search operands with HTML Special Characters
special_search_narrow = [
dict ( operator = ' search ' , operand = ' butter ' ) ,
]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
special_search_result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2018-05-19 05:39:13 +02:00
narrow = ujson . dumps ( special_search_narrow ) ,
anchor = next_message_id ,
num_after = 10 ,
num_before = 0 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2018-05-19 05:39:13 +02:00
self . assertEqual ( len ( special_search_result [ ' messages ' ] ) , 1 )
2018-11-12 13:54:19 +01:00
self . assertEqual ( special_search_result [ ' messages ' ] [ 0 ] [ MATCH_TOPIC ] ,
2018-05-19 05:39:13 +02:00
' bread & <span class= " highlight " >butter</span> ' )
special_search_narrow = [
dict ( operator = ' search ' , operand = ' & ' ) ,
]
special_search_result = self . get_and_check_messages ( dict (
narrow = ujson . dumps ( special_search_narrow ) ,
anchor = next_message_id ,
num_after = 10 ,
num_before = 0 ,
) )
self . assertEqual ( len ( special_search_result [ ' messages ' ] ) , 1 )
2018-11-12 13:54:19 +01:00
self . assertEqual ( special_search_result [ ' messages ' ] [ 0 ] [ MATCH_TOPIC ] ,
2018-05-19 05:39:13 +02:00
' bread <span class= " highlight " >&</span> butter ' )
self . assertEqual ( special_search_result [ ' messages ' ] [ 0 ] [ ' match_content ' ] ,
' <p>chalk <span class= " highlight " >&</span> cheese</p> ' )
2017-11-05 10:51:25 +01:00
def test_messages_in_narrow_for_non_search ( self ) - > None :
2020-03-07 11:43:05 +01:00
user = self . example_user ( " cordelia " )
2020-03-06 18:40:46 +01:00
self . login_user ( user )
2017-08-18 15:50:54 +02:00
2018-05-10 19:00:29 +02:00
def send ( content : str ) - > int :
2017-10-28 17:38:19 +02:00
msg_id = self . send_stream_message (
2020-03-07 11:43:05 +01:00
sender = user ,
2017-10-28 17:38:19 +02:00
stream_name = " Verona " ,
topic_name = ' test_topic ' ,
2017-08-18 15:50:54 +02:00
content = content ,
)
return msg_id
good_id = send ( ' http://foo.com ' )
bad_id = send ( ' no link here ' )
msg_ids = [ good_id , bad_id ]
send ( ' http://bar.com but not in msg_ids ' )
narrow = [
dict ( operator = ' has ' , operand = ' link ' ) ,
]
raw_params = dict ( msg_ids = msg_ids , narrow = narrow )
params = { k : ujson . dumps ( v ) for k , v in raw_params . items ( ) }
result = self . client_get ( ' /json/messages/matches_narrow ' , params )
self . assert_json_success ( result )
messages = result . json ( ) [ ' messages ' ]
self . assertEqual ( len ( list ( messages . keys ( ) ) ) , 1 )
message = messages [ str ( good_id ) ]
self . assertIn ( ' a href= ' , message [ ' match_content ' ] )
self . assertIn ( ' http://foo.com ' , message [ ' match_content ' ] )
2018-11-12 13:54:19 +01:00
self . assertEqual ( message [ MATCH_TOPIC ] , ' test_topic ' )
2017-08-18 15:50:54 +02:00
2017-11-05 10:51:25 +01:00
def test_get_messages_with_only_searching_anchor ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
Test that specifying an anchor but 0 for num_before and num_after
returns at most 1 message .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' cordelia ' )
2016-06-21 21:05:44 +02:00
2020-03-12 14:17:25 +01:00
cordelia = self . example_user ( ' cordelia ' )
anchor = self . send_stream_message ( cordelia , " Verona " )
narrow = [ dict ( operator = ' sender ' , operand = cordelia . email ) ]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
narrow = ujson . dumps ( narrow ) ,
anchor = anchor , num_before = 0 ,
num_after = 0 ,
) )
2016-06-21 21:05:44 +02:00
self . assertEqual ( len ( result [ ' messages ' ] ) , 1 )
narrow = [ dict ( operator = ' is ' , operand = ' mentioned ' ) ]
2016-07-24 16:50:30 +02:00
result = self . get_and_check_messages ( dict ( narrow = ujson . dumps ( narrow ) ,
anchor = anchor , num_before = 0 ,
num_after = 0 ) )
2016-06-21 21:05:44 +02:00
self . assertEqual ( len ( result [ ' messages ' ] ) , 0 )
2018-01-02 18:33:28 +01:00
def test_get_visible_messages_with_anchor ( self ) - > None :
def messages_matches_ids ( messages : List [ Dict [ str , Any ] ] , message_ids : List [ int ] ) - > None :
self . assertEqual ( len ( messages ) , len ( message_ids ) )
for message in messages :
assert ( message [ " id " ] in message_ids )
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2018-01-02 18:33:28 +01:00
2018-09-19 14:23:02 +02:00
Message . objects . all ( ) . delete ( )
2018-01-02 18:33:28 +01:00
message_ids = [ ]
for i in range ( 10 ) :
2020-03-07 11:43:05 +01:00
message_ids . append ( self . send_stream_message ( self . example_user ( " cordelia " ) , " Verona " ) )
2018-01-02 18:33:28 +01:00
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 9 ] , num_before = 9 , num_after = 0 )
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , True )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-01-02 18:33:28 +01:00
messages_matches_ids ( messages , message_ids )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 5 ] ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 9 ] , num_before = 9 , num_after = 0 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , True )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , True )
2018-01-02 18:33:28 +01:00
messages_matches_ids ( messages , message_ids [ 5 : ] )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 2 ] ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 6 ] , num_before = 9 , num_after = 0 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , True )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , True )
2018-01-02 18:33:28 +01:00
messages_matches_ids ( messages , message_ids [ 2 : 7 ] )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 9 ] + 1 ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 9 ] , num_before = 9 , num_after = 0 )
messages = data [ ' messages ' ]
self . assert_length ( messages , 0 )
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , True )
2018-01-02 18:33:28 +01:00
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 5 ] , num_before = 0 , num_after = 5 )
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , True )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , True )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-01-02 18:33:28 +01:00
messages_matches_ids ( messages , message_ids [ 5 : ] )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 7 ] ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 5 ] , num_before = 0 , num_after = 5 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , True )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-01-02 18:33:28 +01:00
messages_matches_ids ( messages , message_ids [ 7 : ] )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 2 ] ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 0 ] , num_before = 0 , num_after = 5 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-03-15 11:21:36 +01:00
messages_matches_ids ( messages , message_ids [ 2 : 7 ] )
2018-01-02 18:33:28 +01:00
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 9 ] + 1 ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 0 ] , num_before = 0 , num_after = 5 )
2018-01-02 18:33:28 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , True )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-03-15 11:43:51 +01:00
self . assert_length ( messages , 0 )
2018-01-02 18:33:28 +01:00
2020-01-28 06:30:23 +01:00
# Verify that with anchor=0 we always get found_oldest=True
with first_visible_id_as ( 0 ) :
data = self . get_messages_response ( anchor = 0 , num_before = 0 , num_after = 5 )
messages = data [ ' messages ' ]
messages_matches_ids ( messages , message_ids [ 0 : 5 ] )
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , False )
self . assertEqual ( data [ ' history_limited ' ] , False )
2020-01-28 06:37:25 +01:00
# Verify that with anchor=-1 we always get found_oldest=True
# anchor=-1 is arguably invalid input, but it used to be supported
with first_visible_id_as ( 0 ) :
data = self . get_messages_response ( anchor = - 1 , num_before = 0 , num_after = 5 )
messages = data [ ' messages ' ]
messages_matches_ids ( messages , message_ids [ 0 : 5 ] )
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , False )
self . assertEqual ( data [ ' history_limited ' ] , False )
# And anchor='first' does the same thing.
with first_visible_id_as ( 0 ) :
data = self . get_messages_response ( anchor = ' oldest ' , num_before = 0 , num_after = 5 )
messages = data [ ' messages ' ]
messages_matches_ids ( messages , message_ids [ 0 : 5 ] )
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , False )
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 5 ] , num_before = 5 , num_after = 4 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , True )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , False )
messages_matches_ids ( messages , message_ids )
data = self . get_messages_response ( anchor = message_ids [ 5 ] , num_before = 10 , num_after = 10 )
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , True )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , True )
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-03-15 11:43:51 +01:00
messages_matches_ids ( messages , message_ids )
2018-01-02 18:33:28 +01:00
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 5 ] ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 5 ] , num_before = 5 , num_after = 4 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , True )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , True )
2018-01-02 18:33:28 +01:00
messages_matches_ids ( messages , message_ids [ 5 : ] )
2018-09-19 14:23:02 +02:00
with first_visible_id_as ( message_ids [ 5 ] ) :
data = self . get_messages_response ( anchor = message_ids [ 2 ] , num_before = 5 , num_after = 3 )
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , False )
self . assertEqual ( data [ ' history_limited ' ] , True )
messages_matches_ids ( messages , message_ids [ 5 : 8 ] )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 5 ] ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 2 ] , num_before = 10 , num_after = 10 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , True )
2018-01-02 18:33:28 +01:00
messages_matches_ids ( messages , message_ids [ 5 : ] )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 9 ] + 1 ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 5 ] , num_before = 5 , num_after = 4 )
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , True )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , True )
2018-03-15 11:43:51 +01:00
self . assert_length ( messages , 0 )
2018-01-02 18:33:28 +01:00
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 5 ] ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 5 ] , num_before = 0 , num_after = 0 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , True )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-01-02 18:33:28 +01:00
messages_matches_ids ( messages , message_ids [ 5 : 6 ] )
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( message_ids [ 5 ] ) :
2018-03-15 11:43:51 +01:00
data = self . get_messages_response ( anchor = message_ids [ 2 ] , num_before = 0 , num_after = 0 )
2018-03-15 11:58:25 +01:00
2018-03-15 11:43:51 +01:00
messages = data [ ' messages ' ]
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , False )
2018-09-19 14:23:02 +02:00
self . assertEqual ( data [ ' history_limited ' ] , False )
2018-03-15 11:58:25 +01:00
self . assert_length ( messages , 0 )
2018-01-02 18:33:28 +01:00
2020-01-28 06:30:23 +01:00
# Verify some additional behavior of found_newest.
with first_visible_id_as ( 0 ) :
data = self . get_messages_response ( anchor = LARGER_THAN_MAX_MESSAGE_ID , num_before = 5 , num_after = 0 )
2020-01-28 06:37:25 +01:00
messages = data [ ' messages ' ]
self . assert_length ( messages , 5 )
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , True )
self . assertEqual ( data [ ' history_limited ' ] , False )
# The anchor value of 'last' behaves just like LARGER_THAN_MAX_MESSAGE_ID.
with first_visible_id_as ( 0 ) :
data = self . get_messages_response ( anchor = ' newest ' , num_before = 5 , num_after = 0 )
2020-01-28 06:30:23 +01:00
messages = data [ ' messages ' ]
self . assert_length ( messages , 5 )
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , False )
self . assertEqual ( data [ ' found_newest ' ] , True )
self . assertEqual ( data [ ' history_limited ' ] , False )
with first_visible_id_as ( 0 ) :
data = self . get_messages_response ( anchor = LARGER_THAN_MAX_MESSAGE_ID + 1 ,
num_before = 5 , num_after = 0 )
messages = data [ ' messages ' ]
self . assert_length ( messages , 5 )
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , False )
2020-01-28 06:31:29 +01:00
self . assertEqual ( data [ ' found_newest ' ] , True )
2020-01-28 06:30:23 +01:00
self . assertEqual ( data [ ' history_limited ' ] , False )
with first_visible_id_as ( 0 ) :
data = self . get_messages_response ( anchor = LARGER_THAN_MAX_MESSAGE_ID , num_before = 20 , num_after = 0 )
messages = data [ ' messages ' ]
self . assert_length ( messages , 10 )
self . assertEqual ( data [ ' found_anchor ' ] , False )
self . assertEqual ( data [ ' found_oldest ' ] , True )
self . assertEqual ( data [ ' found_newest ' ] , True )
self . assertEqual ( data [ ' history_limited ' ] , False )
2017-11-05 10:51:25 +01:00
def test_missing_params ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
anchor , num_before , and num_after are all required
2017-03-24 07:51:46 +01:00
POST parameters for get_messages .
2016-06-21 21:05:44 +02:00
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-06-21 21:05:44 +02:00
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
required_args : Tuple [ Tuple [ str , int ] , . . . ] = ( ( " num_before " , 1 ) , ( " num_after " , 1 ) )
2016-06-21 21:05:44 +02:00
for i in range ( len ( required_args ) ) :
post_params = dict ( required_args [ : i ] + required_args [ i + 1 : ] )
2016-07-28 00:38:45 +02:00
result = self . client_get ( " /json/messages " , post_params )
2016-06-21 21:05:44 +02:00
self . assert_json_error ( result ,
2020-06-10 06:41:04 +02:00
f " Missing ' { required_args [ i ] [ 0 ] } ' argument " )
2016-06-21 21:05:44 +02:00
2018-09-09 14:54:52 +02:00
def test_get_messages_limits ( self ) - > None :
"""
A call to GET / json / messages requesting more than
MAX_MESSAGES_PER_FETCH messages returns an error message .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2018-09-09 14:54:52 +02:00
result = self . client_get ( " /json/messages " , dict ( anchor = 1 , num_before = 3000 , num_after = 3000 ) )
self . assert_json_error ( result , " Too many messages requested (maximum 5000). " )
result = self . client_get ( " /json/messages " , dict ( anchor = 1 , num_before = 6000 , num_after = 0 ) )
self . assert_json_error ( result , " Too many messages requested (maximum 5000). " )
result = self . client_get ( " /json/messages " , dict ( anchor = 1 , num_before = 0 , num_after = 6000 ) )
self . assert_json_error ( result , " Too many messages requested (maximum 5000). " )
2017-11-05 10:51:25 +01:00
def test_bad_int_params ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
num_before , num_after , and narrow must all be non - negative
integers or strings that can be converted to non - negative integers .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-06-21 21:05:44 +02:00
other_params = [ ( " narrow " , { } ) , ( " anchor " , 0 ) ]
int_params = [ " num_before " , " num_after " ]
bad_types = ( False , " " , " -1 " , - 1 )
for idx , param in enumerate ( int_params ) :
for type in bad_types :
# Rotate through every bad type for every integer
# parameter, one at a time.
2016-12-03 18:07:49 +01:00
post_params = dict ( other_params + [ ( param , type ) ] +
[ ( other_param , 0 ) for other_param in
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
int_params [ : idx ] + int_params [ idx + 1 : ] ] ,
2016-06-21 21:05:44 +02:00
)
2016-07-28 00:38:45 +02:00
result = self . client_get ( " /json/messages " , post_params )
2016-06-21 21:05:44 +02:00
self . assert_json_error ( result ,
2020-06-10 06:41:04 +02:00
f " Bad value for ' { param } ' : { type } " )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_bad_narrow_type ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
narrow must be a list of string pairs .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-06-21 21:05:44 +02:00
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
other_params : List [ Tuple [ str , Union [ int , str , bool ] ] ] = [ ( " anchor " , 0 ) , ( " num_before " , 0 ) , ( " num_after " , 0 ) ]
2016-06-21 21:05:44 +02:00
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
bad_types : Tuple [ Union [ int , str , bool ] , . . . ] = (
False , 0 , ' ' , ' { malformed json, ' ,
' {foo: 3} ' , ' [1,2] ' , ' [[ " x " , " y " , " z " ]] ' ,
)
2016-06-21 21:05:44 +02:00
for type in bad_types :
post_params = dict ( other_params + [ ( " narrow " , type ) ] )
2016-07-28 00:38:45 +02:00
result = self . client_get ( " /json/messages " , post_params )
2016-06-21 21:05:44 +02:00
self . assert_json_error ( result ,
2020-06-10 06:41:04 +02:00
f " Bad value for ' narrow ' : { type } " )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_bad_narrow_operator ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
Unrecognized narrow operators are rejected .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-06-21 21:05:44 +02:00
for operator in [ ' ' , ' foo ' , ' stream:verona ' , ' __init__ ' ] :
narrow = [ dict ( operator = operator , operand = ' ' ) ]
params = dict ( anchor = 0 , num_before = 0 , num_after = 0 , narrow = ujson . dumps ( narrow ) )
2016-07-28 00:38:45 +02:00
result = self . client_get ( " /json/messages " , params )
2016-06-21 21:05:44 +02:00
self . assert_json_error_contains ( result ,
2016-12-03 00:04:17 +01:00
" Invalid narrow operator: unknown operator " )
2016-06-21 21:05:44 +02:00
2019-08-07 17:32:19 +02:00
def test_invalid_narrow_operand_in_dict ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2019-08-07 17:32:19 +02:00
# str or int is required for sender, group-pm-with, stream
invalid_operands = [ [ ' 1 ' ] , [ 2 ] , None ]
error_msg = ' elem[ " operand " ] is not a string or integer '
for operand in [ ' sender ' , ' group-pm-with ' , ' stream ' ] :
self . exercise_bad_narrow_operand_using_dict_api ( operand , invalid_operands , error_msg )
# str or int list is required for pm-with operator
invalid_operands = [ None ]
error_msg = ' elem[ " operand " ] is not a string or an integer list '
self . exercise_bad_narrow_operand_using_dict_api ( ' pm-with ' , invalid_operands , error_msg )
invalid_operands = [ [ ' 2 ' ] ]
error_msg = ' elem[ " operand " ][0] is not an integer '
self . exercise_bad_narrow_operand_using_dict_api ( ' pm-with ' , invalid_operands , error_msg )
# For others only str is acceptable
invalid_operands = [ 2 , None , [ 1 ] ]
error_msg = ' elem[ " operand " ] is not a string '
for operand in [ ' is ' , ' near ' , ' has ' , ' id ' ] :
self . exercise_bad_narrow_operand_using_dict_api ( operand , invalid_operands , error_msg )
2020-04-11 17:32:32 +02:00
# Disallow empty search terms
error_msg = ' elem[ " operand " ] cannot be blank. '
self . exercise_bad_narrow_operand_using_dict_api ( ' search ' , [ ' ' ] , error_msg )
2019-08-07 17:32:19 +02:00
# The exercise_bad_narrow_operand helper method uses legacy tuple format to
# test bad narrow, this method uses the current dict api format
def exercise_bad_narrow_operand_using_dict_api ( self , operator : str ,
operands : Sequence [ Any ] ,
error_msg : str ) - > None :
for operand in operands :
narrow = [ dict ( operator = operator , operand = operand ) ]
params = dict ( anchor = 0 , num_before = 0 , num_after = 0 , narrow = ujson . dumps ( narrow ) )
result = self . client_get ( ' /json/messages ' , params )
self . assert_json_error_contains ( result , error_msg )
2016-07-26 23:53:14 +02:00
2018-05-10 19:00:29 +02:00
def exercise_bad_narrow_operand ( self , operator : str ,
2017-11-05 10:51:25 +01:00
operands : Sequence [ Any ] ,
2018-05-10 19:00:29 +02:00
error_msg : str ) - > None :
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
other_params : List [ Tuple [ str , Any ] ] = [ ( " anchor " , 0 ) , ( " num_before " , 0 ) , ( " num_after " , 0 ) ]
2016-06-21 21:05:44 +02:00
for operand in operands :
post_params = dict ( other_params + [
( " narrow " , ujson . dumps ( [ [ operator , operand ] ] ) ) ] )
2016-07-28 00:38:45 +02:00
result = self . client_get ( " /json/messages " , post_params )
2016-06-21 21:05:44 +02:00
self . assert_json_error_contains ( result , error_msg )
2017-11-05 10:51:25 +01:00
def test_bad_narrow_stream_content ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
2017-03-24 07:51:46 +01:00
If an invalid stream name is requested in get_messages , an error is
2016-06-21 21:05:44 +02:00
returned .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
bad_stream_content : Tuple [ int , List [ None ] , List [ str ] ] = ( 0 , [ ] , [ " x " , " y " ] )
2016-06-21 21:05:44 +02:00
self . exercise_bad_narrow_operand ( " stream " , bad_stream_content ,
2016-12-03 00:04:17 +01:00
" Bad value for ' narrow ' " )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_bad_narrow_one_on_one_email_content ( self ) - > None :
2016-06-21 21:05:44 +02:00
"""
2017-03-24 07:51:46 +01:00
If an invalid ' pm-with ' is requested in get_messages , an
2016-06-21 21:05:44 +02:00
error is returned .
"""
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
bad_stream_content : Tuple [ int , List [ None ] , List [ str ] ] = ( 0 , [ ] , [ " x " , " y " ] )
2016-06-21 21:05:44 +02:00
self . exercise_bad_narrow_operand ( " pm-with " , bad_stream_content ,
2016-12-03 00:04:17 +01:00
" Bad value for ' narrow ' " )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_bad_narrow_nonexistent_stream ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-06-21 21:05:44 +02:00
self . exercise_bad_narrow_operand ( " stream " , [ ' non-existent stream ' ] ,
2016-12-03 00:04:17 +01:00
" Invalid narrow operator: unknown stream " )
2016-06-21 21:05:44 +02:00
2019-08-07 17:32:19 +02:00
non_existing_stream_id = 1232891381239
self . exercise_bad_narrow_operand_using_dict_api ( ' stream ' , [ non_existing_stream_id ] ,
' Invalid narrow operator: unknown stream ' )
2017-11-05 10:51:25 +01:00
def test_bad_narrow_nonexistent_email ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2016-06-21 21:05:44 +02:00
self . exercise_bad_narrow_operand ( " pm-with " , [ ' non-existent-user@zulip.com ' ] ,
2016-12-03 00:04:17 +01:00
" Invalid narrow operator: unknown user " )
2016-06-21 21:05:44 +02:00
2019-06-08 23:21:01 +02:00
def test_bad_narrow_pm_with_id_list ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' hamlet ' )
2019-06-08 23:21:01 +02:00
self . exercise_bad_narrow_operand ( ' pm-with ' , [ - 24 ] ,
" Bad value for ' narrow ' : [[ \" pm-with \" ,-24]] " )
2017-11-05 10:51:25 +01:00
def test_message_without_rendered_content ( self ) - > None :
2016-06-21 21:05:44 +02:00
""" Older messages may not have rendered_content in the database """
m = self . get_last_message ( )
m . rendered_content = m . rendered_content_version = None
m . content = ' test content '
2020-03-26 23:16:23 +01:00
wide_dict = MessageDict . wide_dict ( m )
final_dict = MessageDict . finalize_payload (
wide_dict ,
apply_markdown = True ,
client_gravatar = False ,
)
self . assertEqual ( final_dict [ ' content ' ] , ' <p>test content</p> ' )
2016-06-21 21:05:44 +02:00
2018-05-10 19:00:29 +02:00
def common_check_get_messages_query ( self , query_params : Dict [ str , object ] , expected : str ) - > None :
2017-05-07 17:21:26 +02:00
user_profile = self . example_user ( ' hamlet ' )
2016-06-21 21:05:44 +02:00
request = POSTRequestMock ( query_params , user_profile )
with queries_captured ( ) as queries :
2017-03-24 07:51:46 +01:00
get_messages_backend ( request , user_profile )
2016-06-21 21:05:44 +02:00
for query in queries :
2017-03-24 07:51:46 +01:00
if " /* get_messages */ " in query [ ' sql ' ] :
sql = str ( query [ ' sql ' ] ) . replace ( " /* get_messages */ " , ' ' )
2016-06-21 21:05:44 +02:00
self . assertEqual ( sql , expected )
return
2017-03-24 07:51:46 +01:00
raise AssertionError ( " get_messages query not found " )
2016-06-21 21:05:44 +02:00
2018-04-05 14:54:30 +02:00
def test_find_first_unread_anchor ( self ) - > None :
hamlet = self . example_user ( ' hamlet ' )
cordelia = self . example_user ( ' cordelia ' )
othello = self . example_user ( ' othello ' )
self . make_stream ( ' England ' )
# Send a few messages that Hamlet won't have UserMessage rows for.
2020-03-07 11:43:05 +01:00
unsub_message_id = self . send_stream_message ( cordelia , ' England ' )
self . send_personal_message ( cordelia , othello )
2018-04-05 14:54:30 +02:00
self . subscribe ( hamlet , ' England ' )
muted_topics = [
[ ' England ' , ' muted ' ] ,
]
set_topic_mutes ( hamlet , muted_topics )
# send a muted message
2020-03-07 11:43:05 +01:00
muted_message_id = self . send_stream_message ( cordelia , ' England ' , topic_name = ' muted ' )
2018-04-05 14:54:30 +02:00
# finally send Hamlet a "normal" message
2020-03-07 11:43:05 +01:00
first_message_id = self . send_stream_message ( cordelia , ' England ' )
2018-04-05 14:54:30 +02:00
# send a few more messages
2020-03-07 11:43:05 +01:00
extra_message_id = self . send_stream_message ( cordelia , ' England ' )
self . send_personal_message ( cordelia , hamlet )
2018-04-05 14:54:30 +02:00
sa_conn = get_sqlalchemy_connection ( )
user_profile = hamlet
anchor = find_first_unread_anchor (
sa_conn = sa_conn ,
user_profile = user_profile ,
narrow = [ ] ,
)
self . assertEqual ( anchor , first_message_id )
2018-04-05 22:32:30 +02:00
# With the same data setup, we now want to test that a reasonable
# search still gets the first message sent to Hamlet (before he
# subscribed) and other recent messages to the stream.
query_params = dict (
2020-01-29 03:29:15 +01:00
anchor = " first_unread " ,
2018-04-05 22:32:30 +02:00
num_before = 10 ,
num_after = 10 ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
narrow = ' [[ " stream " , " England " ]] ' ,
2018-04-05 22:32:30 +02:00
)
request = POSTRequestMock ( query_params , user_profile )
payload = get_messages_backend ( request , user_profile )
result = ujson . loads ( payload . content )
self . assertEqual ( result [ ' anchor ' ] , first_message_id )
self . assertEqual ( result [ ' found_newest ' ] , True )
self . assertEqual ( result [ ' found_oldest ' ] , True )
messages = result [ ' messages ' ]
self . assertEqual (
{ msg [ ' id ' ] for msg in messages } ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
{ unsub_message_id , muted_message_id , first_message_id , extra_message_id } ,
2018-04-05 22:32:30 +02:00
)
2017-11-05 10:51:25 +01:00
def test_use_first_unread_anchor_with_some_unread_messages ( self ) - > None :
2017-05-07 17:21:26 +02:00
user_profile = self . example_user ( ' hamlet ' )
2016-07-23 18:41:39 +02:00
# Have Othello send messages to Hamlet that he hasn't read.
2018-05-19 03:34:51 +02:00
# Here, Hamlet isn't subscribed to the stream Scotland
2020-03-07 11:43:05 +01:00
self . send_stream_message ( self . example_user ( " othello " ) , " Scotland " )
2018-05-19 03:34:51 +02:00
first_unread_message_id = self . send_personal_message (
2020-03-07 11:43:05 +01:00
self . example_user ( " othello " ) ,
self . example_user ( " hamlet " ) ,
2017-10-28 17:38:19 +02:00
)
2016-07-23 18:41:39 +02:00
# Add a few messages that help us test that our query doesn't
# look at messages that are irrelevant to Hamlet.
2020-03-07 11:43:05 +01:00
self . send_personal_message ( self . example_user ( " othello " ) , self . example_user ( " cordelia " ) )
self . send_personal_message ( self . example_user ( " othello " ) , self . example_user ( " iago " ) )
2016-07-23 18:41:39 +02:00
query_params = dict (
2020-01-29 03:29:15 +01:00
anchor = " first_unread " ,
2017-02-22 23:32:43 +01:00
num_before = 10 ,
num_after = 10 ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
narrow = ' [] ' ,
2016-07-23 18:41:39 +02:00
)
request = POSTRequestMock ( query_params , user_profile )
with queries_captured ( ) as all_queries :
2017-03-24 07:51:46 +01:00
get_messages_backend ( request , user_profile )
2016-07-23 18:41:39 +02:00
# Verify the query for old messages looks correct.
2017-03-24 07:51:46 +01:00
queries = [ q for q in all_queries if ' /* get_messages */ ' in q [ ' sql ' ] ]
2016-07-23 18:41:39 +02:00
self . assertEqual ( len ( queries ) , 1 )
sql = queries [ 0 ] [ ' sql ' ]
2020-06-10 06:41:04 +02:00
self . assertNotIn ( f ' AND message_id = { LARGER_THAN_MAX_MESSAGE_ID } ' , sql )
2016-07-23 18:41:39 +02:00
self . assertIn ( ' ORDER BY message_id ASC ' , sql )
2020-06-13 08:59:37 +02:00
cond = f ' WHERE user_profile_id = { user_profile . id } AND message_id >= { first_unread_message_id } '
2017-02-22 23:32:43 +01:00
self . assertIn ( cond , sql )
2020-06-13 08:59:37 +02:00
cond = f ' WHERE user_profile_id = { user_profile . id } AND message_id <= { first_unread_message_id - 1 } '
2018-01-02 18:33:28 +01:00
self . assertIn ( cond , sql )
2018-03-13 00:17:07 +01:00
self . assertIn ( ' UNION ' , sql )
2018-01-02 18:33:28 +01:00
def test_visible_messages_use_first_unread_anchor_with_some_unread_messages ( self ) - > None :
user_profile = self . example_user ( ' hamlet ' )
# Have Othello send messages to Hamlet that he hasn't read.
self . subscribe ( self . example_user ( " hamlet " ) , ' Scotland ' )
2020-03-07 11:43:05 +01:00
first_unread_message_id = self . send_stream_message ( self . example_user ( " othello " ) , " Scotland " )
self . send_stream_message ( self . example_user ( " othello " ) , " Scotland " )
self . send_stream_message ( self . example_user ( " othello " ) , " Scotland " )
2018-01-02 18:33:28 +01:00
self . send_personal_message (
2020-03-07 11:43:05 +01:00
self . example_user ( " othello " ) ,
self . example_user ( " hamlet " ) ,
2018-01-02 18:33:28 +01:00
)
# Add a few messages that help us test that our query doesn't
# look at messages that are irrelevant to Hamlet.
2020-03-07 11:43:05 +01:00
self . send_personal_message ( self . example_user ( " othello " ) , self . example_user ( " cordelia " ) )
self . send_personal_message ( self . example_user ( " othello " ) , self . example_user ( " iago " ) )
2018-01-02 18:33:28 +01:00
query_params = dict (
2020-01-29 03:29:15 +01:00
anchor = " first_unread " ,
2018-01-02 18:33:28 +01:00
num_before = 10 ,
num_after = 10 ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
narrow = ' [] ' ,
2018-01-02 18:33:28 +01:00
)
request = POSTRequestMock ( query_params , user_profile )
first_visible_message_id = first_unread_message_id + 2
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( first_visible_message_id ) :
2018-01-02 18:33:28 +01:00
with queries_captured ( ) as all_queries :
get_messages_backend ( request , user_profile )
queries = [ q for q in all_queries if ' /* get_messages */ ' in q [ ' sql ' ] ]
self . assertEqual ( len ( queries ) , 1 )
sql = queries [ 0 ] [ ' sql ' ]
2020-06-10 06:41:04 +02:00
self . assertNotIn ( f ' AND message_id = { LARGER_THAN_MAX_MESSAGE_ID } ' , sql )
2018-01-02 18:33:28 +01:00
self . assertIn ( ' ORDER BY message_id ASC ' , sql )
2020-06-13 08:59:37 +02:00
cond = f ' WHERE user_profile_id = { user_profile . id } AND message_id <= { first_unread_message_id - 1 } '
2018-01-02 18:33:28 +01:00
self . assertIn ( cond , sql )
2020-06-13 08:59:37 +02:00
cond = f ' WHERE user_profile_id = { user_profile . id } AND message_id >= { first_visible_message_id } '
2016-07-23 18:41:39 +02:00
self . assertIn ( cond , sql )
2017-11-05 10:51:25 +01:00
def test_use_first_unread_anchor_with_no_unread_messages ( self ) - > None :
2017-05-07 17:21:26 +02:00
user_profile = self . example_user ( ' hamlet ' )
2016-07-23 18:41:39 +02:00
query_params = dict (
2020-01-29 03:29:15 +01:00
anchor = " first_unread " ,
2017-02-22 23:32:43 +01:00
num_before = 10 ,
num_after = 10 ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
narrow = ' [] ' ,
2016-07-23 18:41:39 +02:00
)
request = POSTRequestMock ( query_params , user_profile )
with queries_captured ( ) as all_queries :
2017-03-24 07:51:46 +01:00
get_messages_backend ( request , user_profile )
2016-07-23 18:41:39 +02:00
2017-03-24 07:51:46 +01:00
queries = [ q for q in all_queries if ' /* get_messages */ ' in q [ ' sql ' ] ]
2016-07-23 18:41:39 +02:00
self . assertEqual ( len ( queries ) , 1 )
2018-03-12 20:33:00 +01:00
sql = queries [ 0 ] [ ' sql ' ]
self . assertNotIn ( ' AND message_id <= ' , sql )
2018-03-13 00:17:07 +01:00
self . assertNotIn ( ' AND message_id >= ' , sql )
2016-07-23 18:41:39 +02:00
2018-01-02 18:33:28 +01:00
first_visible_message_id = 5
2018-03-15 11:58:25 +01:00
with first_visible_id_as ( first_visible_message_id ) :
2018-01-02 18:33:28 +01:00
with queries_captured ( ) as all_queries :
get_messages_backend ( request , user_profile )
queries = [ q for q in all_queries if ' /* get_messages */ ' in q [ ' sql ' ] ]
2018-03-12 20:33:00 +01:00
sql = queries [ 0 ] [ ' sql ' ]
2018-09-19 14:23:02 +02:00
self . assertNotIn ( ' AND message_id <= ' , sql )
self . assertNotIn ( ' AND message_id >= ' , sql )
2018-01-02 18:33:28 +01:00
2017-11-05 10:51:25 +01:00
def test_use_first_unread_anchor_with_muted_topics ( self ) - > None :
2016-07-23 18:07:08 +02:00
"""
Test that our logic related to ` use_first_unread_anchor `
2017-02-23 05:50:15 +01:00
invokes the ` message_id = LARGER_THAN_MAX_MESSAGE_ID ` hack for
2017-03-24 07:51:46 +01:00
the ` / * get_messages * / ` query when relevant muting
2016-07-23 18:07:08 +02:00
is in effect .
This is a very arcane test on arcane , but very heavily
2017-03-24 07:51:46 +01:00
field - tested , logic in get_messages_backend ( ) . If
2016-07-23 18:07:08 +02:00
this test breaks , be absolutely sure you know what you ' re
doing .
"""
2017-01-04 05:30:48 +01:00
realm = get_realm ( ' zulip ' )
2016-10-21 22:59:59 +02:00
self . make_stream ( ' web stuff ' )
2017-08-30 02:19:34 +02:00
self . make_stream ( ' bogus ' )
2017-05-07 17:21:26 +02:00
user_profile = self . example_user ( ' hamlet ' )
2017-08-24 17:58:40 +02:00
muted_topics = [
[ ' Scotland ' , ' golf ' ] ,
[ ' web stuff ' , ' css ' ] ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
[ ' bogus ' , ' bogus ' ] ,
2017-08-24 17:58:40 +02:00
]
set_topic_mutes ( user_profile , muted_topics )
2016-06-21 21:05:44 +02:00
query_params = dict (
2020-01-29 03:29:15 +01:00
anchor = " first_unread " ,
2016-06-21 21:05:44 +02:00
num_before = 0 ,
num_after = 0 ,
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
narrow = ' [[ " stream " , " Scotland " ]] ' ,
2016-06-21 21:05:44 +02:00
)
request = POSTRequestMock ( query_params , user_profile )
2016-07-23 18:07:08 +02:00
with queries_captured ( ) as all_queries :
2017-03-24 07:51:46 +01:00
get_messages_backend ( request , user_profile )
2016-06-21 21:05:44 +02:00
2016-07-23 18:07:08 +02:00
# Do some tests on the main query, to verify the muting logic
# runs on this code path.
2016-12-08 09:39:48 +01:00
queries = [ q for q in all_queries if str ( q [ ' sql ' ] ) . startswith ( " SELECT message_id, flags " ) ]
2016-07-23 18:07:08 +02:00
self . assertEqual ( len ( queries ) , 1 )
stream = get_stream ( ' Scotland ' , realm )
2020-02-18 17:25:43 +01:00
recipient_id = stream . recipient . id
2020-06-09 00:25:09 +02:00
cond = f " AND NOT (recipient_id = { recipient_id } AND upper(subject) = upper( ' golf ' )) "
2016-07-23 18:07:08 +02:00
self . assertIn ( cond , queries [ 0 ] [ ' sql ' ] )
# Next, verify the use_first_unread_anchor setting invokes
2017-02-23 05:50:15 +01:00
# the `message_id = LARGER_THAN_MAX_MESSAGE_ID` hack.
2017-03-24 07:51:46 +01:00
queries = [ q for q in all_queries if ' /* get_messages */ ' in q [ ' sql ' ] ]
2016-07-23 18:07:08 +02:00
self . assertEqual ( len ( queries ) , 1 )
2020-06-13 08:59:37 +02:00
self . assertIn ( f ' AND zerver_message.id = { LARGER_THAN_MAX_MESSAGE_ID } ' ,
2017-02-23 05:50:15 +01:00
queries [ 0 ] [ ' sql ' ] )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_exclude_muting_conditions ( self ) - > None :
2017-01-04 05:30:48 +01:00
realm = get_realm ( ' zulip ' )
2016-10-21 22:59:59 +02:00
self . make_stream ( ' web stuff ' )
2017-05-07 17:21:26 +02:00
user_profile = self . example_user ( ' hamlet ' )
2016-07-23 17:07:38 +02:00
2017-08-30 02:19:34 +02:00
self . make_stream ( ' irrelevant_stream ' )
2016-07-23 17:07:38 +02:00
# Test the do-nothing case first.
2017-08-24 17:58:40 +02:00
muted_topics = [
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
[ ' irrelevant_stream ' , ' irrelevant_topic ' ] ,
2017-08-24 17:58:40 +02:00
]
set_topic_mutes ( user_profile , muted_topics )
2016-07-23 17:07:38 +02:00
# If nothing relevant is muted, then exclude_muting_conditions()
# should return an empty list.
narrow = [
dict ( operator = ' stream ' , operand = ' Scotland ' ) ,
]
muting_conditions = exclude_muting_conditions ( user_profile , narrow )
self . assertEqual ( muting_conditions , [ ] )
2019-08-07 17:32:19 +02:00
# Also test that passing stream ID works
narrow = [
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
dict ( operator = ' stream ' , operand = get_stream ( ' Scotland ' , realm ) . id ) ,
2019-08-07 17:32:19 +02:00
]
muting_conditions = exclude_muting_conditions ( user_profile , narrow )
self . assertEqual ( muting_conditions , [ ] )
2016-07-23 17:07:38 +02:00
# Ok, now set up our muted topics to include a topic relevant to our narrow.
2017-08-24 17:58:40 +02:00
muted_topics = [
[ ' Scotland ' , ' golf ' ] ,
[ ' web stuff ' , ' css ' ] ,
]
set_topic_mutes ( user_profile , muted_topics )
2016-06-21 21:05:44 +02:00
2016-07-23 17:07:38 +02:00
# And verify that our query will exclude them.
2016-06-21 21:05:44 +02:00
narrow = [
dict ( operator = ' stream ' , operand = ' Scotland ' ) ,
]
muting_conditions = exclude_muting_conditions ( user_profile , narrow )
2017-02-22 22:13:57 +01:00
query = select ( [ column ( " id " ) . label ( " message_id " ) ] , None , table ( " zerver_message " ) )
2016-06-21 21:05:44 +02:00
query = query . where ( * muting_conditions )
2019-08-28 22:35:22 +02:00
expected_query = ''' \
SELECT id AS message_id \n \
FROM zerver_message \n \
WHERE NOT ( recipient_id = % ( recipient_id_1 ) s AND upper ( subject ) = upper ( % ( param_1 ) s ) ) \
'''
self . assertEqual ( get_sqlalchemy_sql ( query ) , expected_query )
2016-06-21 21:05:44 +02:00
params = get_sqlalchemy_query_params ( query )
self . assertEqual ( params [ ' recipient_id_1 ' ] , get_recipient_id_for_stream_name ( realm , ' Scotland ' ) )
2018-11-01 22:15:43 +01:00
self . assertEqual ( params [ ' param_1 ' ] , ' golf ' )
2016-06-21 21:05:44 +02:00
mute_stream ( realm , user_profile , ' Verona ' )
2017-09-20 22:02:22 +02:00
# Using a bogus stream name should be similar to using no narrow at
# all, and we'll exclude all mutes.
narrow = [
dict ( operator = ' stream ' , operand = ' bogus-stream-name ' ) ,
]
2016-06-21 21:05:44 +02:00
muting_conditions = exclude_muting_conditions ( user_profile , narrow )
2017-02-22 22:13:57 +01:00
query = select ( [ column ( " id " ) ] , None , table ( " zerver_message " ) )
2016-06-21 21:05:44 +02:00
query = query . where ( and_ ( * muting_conditions ) )
2019-08-28 22:35:22 +02:00
expected_query = ''' \
SELECT id \n \
FROM zerver_message \n \
WHERE recipient_id NOT IN ( % ( recipient_id_1 ) s ) \
AND NOT \
( recipient_id = % ( recipient_id_2 ) s AND upper ( subject ) = upper ( % ( param_1 ) s ) OR \
recipient_id = % ( recipient_id_3 ) s AND upper ( subject ) = upper ( % ( param_2 ) s ) ) \
'''
self . assertEqual ( get_sqlalchemy_sql ( query ) , expected_query )
2016-06-21 21:05:44 +02:00
params = get_sqlalchemy_query_params ( query )
self . assertEqual ( params [ ' recipient_id_1 ' ] , get_recipient_id_for_stream_name ( realm , ' Verona ' ) )
self . assertEqual ( params [ ' recipient_id_2 ' ] , get_recipient_id_for_stream_name ( realm , ' Scotland ' ) )
2018-11-01 22:15:43 +01:00
self . assertEqual ( params [ ' param_1 ' ] , ' golf ' )
2016-10-21 22:59:59 +02:00
self . assertEqual ( params [ ' recipient_id_3 ' ] , get_recipient_id_for_stream_name ( realm , ' web stuff ' ) )
2018-11-01 22:15:43 +01:00
self . assertEqual ( params [ ' param_2 ' ] , ' css ' )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_get_messages_queries ( self ) - > None :
2016-06-21 21:05:44 +02:00
query_ids = self . get_query_ids ( )
2018-03-13 01:07:12 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage \n WHERE user_profile_id = {hamlet_id} AND message_id = 0) AS anon_1 ORDER BY message_id ASC '
2018-03-13 18:14:58 +01:00
sql = sql_template . format ( * * query_ids )
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 0 } , sql )
2018-03-13 01:07:12 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage \n WHERE user_profile_id = {hamlet_id} AND message_id = 0) AS anon_1 ORDER BY message_id ASC '
2018-03-13 18:14:58 +01:00
sql = sql_template . format ( * * query_ids )
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 1 , ' num_after ' : 0 } , sql )
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage \n WHERE user_profile_id = {hamlet_id} ORDER BY message_id ASC \n LIMIT 2) AS anon_1 ORDER BY message_id ASC '
2018-03-13 18:14:58 +01:00
sql = sql_template . format ( * * query_ids )
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 1 } , sql )
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage \n WHERE user_profile_id = {hamlet_id} ORDER BY message_id ASC \n LIMIT 11) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
2017-03-24 07:51:46 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 10 } , sql )
2016-06-21 21:05:44 +02:00
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage \n WHERE user_profile_id = {hamlet_id} AND message_id <= 100 ORDER BY message_id DESC \n LIMIT 11) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
2017-03-24 07:51:46 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 100 , ' num_before ' : 10 , ' num_after ' : 0 } , sql )
2016-06-21 21:05:44 +02:00
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM ((SELECT message_id, flags \n FROM zerver_usermessage \n WHERE user_profile_id = {hamlet_id} AND message_id <= 99 ORDER BY message_id DESC \n LIMIT 10) UNION ALL (SELECT message_id, flags \n FROM zerver_usermessage \n WHERE user_profile_id = {hamlet_id} AND message_id >= 100 ORDER BY message_id ASC \n LIMIT 11)) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
2017-03-24 07:51:46 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 100 , ' num_before ' : 10 , ' num_after ' : 10 } , sql )
2016-06-21 21:05:44 +02:00
2017-11-05 10:51:25 +01:00
def test_get_messages_with_narrow_queries ( self ) - > None :
2016-06-21 21:05:44 +02:00
query_ids = self . get_query_ids ( )
2020-03-12 14:17:25 +01:00
hamlet_email = self . example_user ( ' hamlet ' ) . email
othello_email = self . example_user ( ' othello ' ) . email
2016-06-21 21:05:44 +02:00
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND (sender_id = {othello_id} AND recipient_id = {hamlet_recipient} OR sender_id = {hamlet_id} AND recipient_id = {othello_recipient} ) AND message_id = 0) AS anon_1 ORDER BY message_id ASC '
2018-03-13 18:14:58 +01:00
sql = sql_template . format ( * * query_ids )
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 0 ,
2020-06-10 06:41:04 +02:00
' narrow ' : f ' [[ " pm-with " , " { othello_email } " ]] ' } ,
2018-03-13 18:14:58 +01:00
sql )
2018-03-13 01:07:12 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND (sender_id = {othello_id} AND recipient_id = {hamlet_recipient} OR sender_id = {hamlet_id} AND recipient_id = {othello_recipient} ) AND message_id = 0) AS anon_1 ORDER BY message_id ASC '
2018-03-13 18:14:58 +01:00
sql = sql_template . format ( * * query_ids )
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 1 , ' num_after ' : 0 ,
2020-06-10 06:41:04 +02:00
' narrow ' : f ' [[ " pm-with " , " { othello_email } " ]] ' } ,
2018-03-13 18:14:58 +01:00
sql )
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND (sender_id = {othello_id} AND recipient_id = {hamlet_recipient} OR sender_id = {hamlet_id} AND recipient_id = {othello_recipient} ) ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2020-06-10 06:41:04 +02:00
' narrow ' : f ' [[ " pm-with " , " { othello_email } " ]] ' } ,
2017-03-24 07:51:46 +01:00
sql )
2016-06-21 21:05:44 +02:00
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND (flags & 2) != 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2017-03-24 07:51:46 +01:00
' narrow ' : ' [[ " is " , " starred " ]] ' } ,
sql )
2016-06-21 21:05:44 +02:00
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND sender_id = {othello_id} ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2020-06-10 06:41:04 +02:00
' narrow ' : f ' [[ " sender " , " { othello_email } " ]] ' } ,
2017-03-24 07:51:46 +01:00
sql )
2016-06-21 21:05:44 +02:00
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id \n FROM (SELECT id AS message_id \n FROM zerver_message \n WHERE recipient_id = {scotland_recipient} ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2017-03-24 07:51:46 +01:00
' narrow ' : ' [[ " stream " , " Scotland " ]] ' } ,
sql )
2016-06-21 21:05:44 +02:00
2019-08-13 20:20:36 +02:00
sql_template = ' SELECT anon_1.message_id \n FROM (SELECT id AS message_id \n FROM zerver_message \n WHERE recipient_id IN ( {public_streams_recipents} ) ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC '
sql = sql_template . format ( * * query_ids )
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
' narrow ' : ' [[ " streams " , " public " ]] ' } ,
sql )
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND recipient_id NOT IN ( {public_streams_recipents} ) ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC '
sql = sql_template . format ( * * query_ids )
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
' narrow ' : ' [ { " operator " : " streams " , " operand " : " public " , " negated " : true}] ' } ,
sql )
2018-03-13 00:17:07 +01:00
sql_template = " SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND upper(subject) = upper( ' blah ' ) ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC "
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2017-03-24 07:51:46 +01:00
' narrow ' : ' [[ " topic " , " blah " ]] ' } ,
sql )
2016-06-21 21:05:44 +02:00
2018-03-13 00:17:07 +01:00
sql_template = " SELECT anon_1.message_id \n FROM (SELECT id AS message_id \n FROM zerver_message \n WHERE recipient_id = {scotland_recipient} AND upper(subject) = upper( ' blah ' ) ORDER BY zerver_message.id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC "
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2017-03-24 07:51:46 +01:00
' narrow ' : ' [[ " stream " , " Scotland " ], [ " topic " , " blah " ]] ' } ,
sql )
2016-06-21 21:05:44 +02:00
# Narrow to pms with yourself
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND sender_id = {hamlet_id} AND recipient_id = {hamlet_recipient} ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2020-06-10 06:41:04 +02:00
' narrow ' : f ' [[ " pm-with " , " { hamlet_email } " ]] ' } ,
2017-03-24 07:51:46 +01:00
sql )
2016-06-21 21:05:44 +02:00
2018-03-13 00:17:07 +01:00
sql_template = ' SELECT anon_1.message_id, anon_1.flags \n FROM (SELECT message_id, flags \n FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage.message_id = zerver_message.id \n WHERE user_profile_id = {hamlet_id} AND recipient_id = {scotland_recipient} AND (flags & 2) != 0 ORDER BY message_id ASC \n LIMIT 10) AS anon_1 ORDER BY message_id ASC '
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2017-03-24 07:51:46 +01:00
' narrow ' : ' [[ " stream " , " Scotland " ], [ " is " , " starred " ]] ' } ,
sql )
2016-06-21 21:05:44 +02:00
2016-04-24 17:08:51 +02:00
@override_settings ( USING_PGROONGA = False )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_search_queries ( self ) - > None :
2016-06-21 21:05:44 +02:00
query_ids = self . get_query_ids ( )
2019-08-28 11:06:38 +02:00
sql_template = """ \
SELECT anon_1 . message_id , anon_1 . flags , anon_1 . subject , anon_1 . rendered_content , anon_1 . content_matches , anon_1 . topic_matches \n \
FROM ( SELECT message_id , flags , subject , rendered_content , array ( ( SELECT ARRAY [ sum ( length ( anon_3 ) - 11 ) OVER ( ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING ) + 11 , strpos ( anon_3 , ' </ts-match> ' ) - 1 ] AS anon_2 \n \
FROM unnest ( string_to_array ( ts_headline ( ' zulip.english_us_search ' , rendered_content , plainto_tsquery ( ' zulip.english_us_search ' , ' jumping ' ) , ' HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match> ' ) , ' <ts-match> ' ) ) AS anon_3 \n \
LIMIT ALL OFFSET 1 ) ) AS content_matches , array ( ( SELECT ARRAY [ sum ( length ( anon_5 ) - 11 ) OVER ( ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING ) + 11 , strpos ( anon_5 , ' </ts-match> ' ) - 1 ] AS anon_4 \n \
FROM unnest ( string_to_array ( ts_headline ( ' zulip.english_us_search ' , escape_html ( subject ) , plainto_tsquery ( ' zulip.english_us_search ' , ' jumping ' ) , ' HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match> ' ) , ' <ts-match> ' ) ) AS anon_5 \n \
LIMIT ALL OFFSET 1 ) ) AS topic_matches \n \
FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage . message_id = zerver_message . id \n \
WHERE user_profile_id = { hamlet_id } AND ( search_tsvector @ @ plainto_tsquery ( ' zulip.english_us_search ' , ' jumping ' ) ) ORDER BY message_id ASC \n \
LIMIT 10 ) AS anon_1 ORDER BY message_id ASC \
"""
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2017-03-24 07:51:46 +01:00
' narrow ' : ' [[ " search " , " jumping " ]] ' } ,
sql )
2016-06-21 21:05:44 +02:00
2019-08-28 11:06:38 +02:00
sql_template = """ \
SELECT anon_1 . message_id , anon_1 . subject , anon_1 . rendered_content , anon_1 . content_matches , anon_1 . topic_matches \n \
FROM ( SELECT id AS message_id , subject , rendered_content , array ( ( SELECT ARRAY [ sum ( length ( anon_3 ) - 11 ) OVER ( ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING ) + 11 , strpos ( anon_3 , ' </ts-match> ' ) - 1 ] AS anon_2 \n \
FROM unnest ( string_to_array ( ts_headline ( ' zulip.english_us_search ' , rendered_content , plainto_tsquery ( ' zulip.english_us_search ' , ' jumping ' ) , ' HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match> ' ) , ' <ts-match> ' ) ) AS anon_3 \n \
LIMIT ALL OFFSET 1 ) ) AS content_matches , array ( ( SELECT ARRAY [ sum ( length ( anon_5 ) - 11 ) OVER ( ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING ) + 11 , strpos ( anon_5 , ' </ts-match> ' ) - 1 ] AS anon_4 \n \
FROM unnest ( string_to_array ( ts_headline ( ' zulip.english_us_search ' , escape_html ( subject ) , plainto_tsquery ( ' zulip.english_us_search ' , ' jumping ' ) , ' HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match> ' ) , ' <ts-match> ' ) ) AS anon_5 \n \
LIMIT ALL OFFSET 1 ) ) AS topic_matches \n \
FROM zerver_message \n \
WHERE recipient_id = { scotland_recipient } AND ( search_tsvector @ @ plainto_tsquery ( ' zulip.english_us_search ' , ' jumping ' ) ) ORDER BY zerver_message . id ASC \n \
LIMIT 10 ) AS anon_1 ORDER BY message_id ASC \
"""
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2017-03-24 07:51:46 +01:00
' narrow ' : ' [[ " stream " , " Scotland " ], [ " search " , " jumping " ]] ' } ,
sql )
2016-06-21 21:05:44 +02:00
2019-08-28 11:06:38 +02:00
sql_template = """ \
SELECT anon_1 . message_id , anon_1 . flags , anon_1 . subject , anon_1 . rendered_content , anon_1 . content_matches , anon_1 . topic_matches \n \
FROM ( SELECT message_id , flags , subject , rendered_content , array ( ( SELECT ARRAY [ sum ( length ( anon_3 ) - 11 ) OVER ( ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING ) + 11 , strpos ( anon_3 , ' </ts-match> ' ) - 1 ] AS anon_2 \n \
FROM unnest ( string_to_array ( ts_headline ( ' zulip.english_us_search ' , rendered_content , plainto_tsquery ( ' zulip.english_us_search ' , ' " jumping " quickly ' ) , ' HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match> ' ) , ' <ts-match> ' ) ) AS anon_3 \n \
LIMIT ALL OFFSET 1 ) ) AS content_matches , array ( ( SELECT ARRAY [ sum ( length ( anon_5 ) - 11 ) OVER ( ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING ) + 11 , strpos ( anon_5 , ' </ts-match> ' ) - 1 ] AS anon_4 \n \
FROM unnest ( string_to_array ( ts_headline ( ' zulip.english_us_search ' , escape_html ( subject ) , plainto_tsquery ( ' zulip.english_us_search ' , ' " jumping " quickly ' ) , ' HighlightAll = TRUE, StartSel = <ts-match>, StopSel = </ts-match> ' ) , ' <ts-match> ' ) ) AS anon_5 \n \
LIMIT ALL OFFSET 1 ) ) AS topic_matches \n \
FROM zerver_usermessage JOIN zerver_message ON zerver_usermessage . message_id = zerver_message . id \n \
WHERE user_profile_id = { hamlet_id } AND ( content ILIKE ' % jumping % ' OR subject ILIKE ' % jumping % ' ) AND ( search_tsvector @ @ plainto_tsquery ( ' zulip.english_us_search ' , ' " jumping " quickly ' ) ) ORDER BY message_id ASC \n \
LIMIT 10 ) AS anon_1 ORDER BY message_id ASC \
"""
2016-06-21 21:05:44 +02:00
sql = sql_template . format ( * * query_ids )
search: Make `num_after`/`num_after` more consistent.
We now consistently set our query limits so that we get at
least `num_after` rows such that id > anchor. (Obviously, the
caveat is that if there aren't enough rows that fulfill the
query, we'll return the full set of rows, but that may be less
than `num_after`.) Likewise for `num_before`.
Before this change, we would sometimes return one too few rows
for narrow queries.
Now, we're still a bit broken, but in a more consistent way. If
we have a query that does not match the anchor row (which could
be true even for a non-narrow query), but which does match lots
of rows after the anchor, we'll return `num_after + 1` rows
on the right hand side, whether or not the query has narrow
parameters.
The off-by-one semantics here have probably been moot all along,
since our windows are approximate to begin with. If we set
num_after to 100, its just a rough performance optimization to
begin with, so it doesn't matter whether we return 99 or 101 rows,
as long as we set the anchor correctly on the subsequent query.
We will make the results more rigorous in a follow up commit.
2018-03-14 13:22:16 +01:00
self . common_check_get_messages_query ( { ' anchor ' : 0 , ' num_before ' : 0 , ' num_after ' : 9 ,
2017-03-24 07:51:46 +01:00
' narrow ' : ' [[ " search " , " \\ " jumping \\ " quickly " ]] ' } ,
sql )
2017-04-29 00:03:43 +02:00
@override_settings ( USING_PGROONGA = False )
2017-11-05 10:51:25 +01:00
def test_get_messages_with_search_using_email ( self ) - > None :
2020-03-06 18:40:46 +01:00
self . login ( ' cordelia ' )
2017-04-29 00:03:43 +02:00
2020-03-12 14:17:25 +01:00
othello = self . example_user ( ' othello ' )
cordelia = self . example_user ( ' cordelia ' )
2017-04-29 00:03:43 +02:00
messages_to_search = [
( ' say hello ' , ' How are you doing, @**Othello, the Moor of Venice**? ' ) ,
( ' lunch plans ' , ' I am hungry! ' ) ,
]
2017-08-15 18:20:45 +02:00
next_message_id = self . get_last_message ( ) . id + 1
2017-04-29 00:03:43 +02:00
for topic , content in messages_to_search :
2017-10-28 17:38:19 +02:00
self . send_stream_message (
2020-03-12 14:17:25 +01:00
sender = cordelia ,
2017-10-28 17:38:19 +02:00
stream_name = " Verona " ,
2017-04-29 00:03:43 +02:00
content = content ,
2017-10-28 17:38:19 +02:00
topic_name = topic ,
2017-04-29 00:03:43 +02:00
)
self . _update_tsvector_index ( )
narrow = [
2020-03-12 14:17:25 +01:00
dict ( operator = ' sender ' , operand = cordelia . email ) ,
dict ( operator = ' search ' , operand = othello . email ) ,
2017-04-29 00:03:43 +02:00
]
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
result : Dict [ str , Any ] = self . get_and_check_messages ( dict (
2017-04-29 00:03:43 +02:00
narrow = ujson . dumps ( narrow ) ,
2017-08-15 18:20:45 +02:00
anchor = next_message_id ,
2017-04-29 00:03:43 +02:00
num_after = 10 ,
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
) )
2017-04-29 00:03:43 +02:00
self . assertEqual ( len ( result [ ' messages ' ] ) , 0 )
narrow = [
2020-03-12 14:17:25 +01:00
dict ( operator = ' sender ' , operand = cordelia . email ) ,
2017-04-29 00:03:43 +02:00
dict ( operator = ' search ' , operand = ' othello ' ) ,
]
result = self . get_and_check_messages ( dict (
narrow = ujson . dumps ( narrow ) ,
2017-08-15 18:20:45 +02:00
anchor = next_message_id ,
2017-04-29 00:03:43 +02:00
num_after = 10 ,
2017-04-29 00:47:26 +02:00
) )
2017-04-29 00:03:43 +02:00
self . assertEqual ( len ( result [ ' messages ' ] ) , 1 )
messages = result [ ' messages ' ]
2020-03-19 17:44:55 +01:00
( hello_message , ) = [
m for m in messages
if m [ TOPIC_NAME ] == ' say hello '
]
2017-04-29 00:03:43 +02:00
self . assertEqual (
2020-03-19 17:44:55 +01:00
hello_message [ MATCH_TOPIC ] ,
2017-04-29 00:03:43 +02:00
' say hello ' )
self . assertEqual (
2020-03-19 17:44:55 +01:00
hello_message [ ' match_content ' ] ,
2020-06-14 02:57:50 +02:00
f ' <p>How are you doing, <span class= " user-mention " data-user-id= " { othello . id } " > '
' @<span class= " highlight " >Othello</span>, the Moor of Venice</span>?</p> ' ,
)