2013-12-12 18:36:32 +01:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
2016-05-25 15:02:02 +02:00
|
|
|
from django.utils.translation import ugettext as _
|
2016-06-21 21:34:41 +02:00
|
|
|
from django.utils.timezone import now
|
2013-12-12 18:36:32 +01:00
|
|
|
from django.conf import settings
|
|
|
|
from django.core import validators
|
|
|
|
from django.core.exceptions import ValidationError
|
|
|
|
from django.db import connection
|
|
|
|
from django.db.models import Q
|
2016-06-06 00:32:39 +02:00
|
|
|
from django.http import HttpRequest, HttpResponse
|
2016-12-25 00:44:26 +01:00
|
|
|
from typing import Text
|
2016-09-12 02:09:24 +02:00
|
|
|
from typing import Any, AnyStr, Callable, Iterable, Optional, Tuple, Union
|
2016-04-24 17:08:51 +02:00
|
|
|
from zerver.lib.str_utils import force_bytes, force_text
|
2017-02-20 00:19:29 +01:00
|
|
|
from zerver.lib.html_diff import highlight_html_differences
|
2016-06-06 00:32:39 +02:00
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
from zerver.decorator import authenticated_api_view, authenticated_json_post_view, \
|
2014-02-14 15:48:42 +01:00
|
|
|
has_request_variables, REQ, JsonableError, \
|
2016-07-09 05:57:27 +02:00
|
|
|
to_non_negative_int
|
2013-11-26 00:41:24 +01:00
|
|
|
from django.utils.html import escape as escape_html
|
2013-12-12 18:36:32 +01:00
|
|
|
from zerver.lib import bugdown
|
|
|
|
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
|
|
|
|
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
|
|
|
|
create_mirror_user_if_needed, check_send_message, do_update_message, \
|
2016-09-14 21:58:44 +02:00
|
|
|
extract_recipients, truncate_body, render_incoming_message
|
2016-10-27 12:06:44 +02:00
|
|
|
from zerver.lib.queue import queue_json_publish
|
2016-10-04 15:40:02 +02:00
|
|
|
from zerver.lib.cache import (
|
|
|
|
generic_bulk_cached_fetch,
|
|
|
|
to_dict_cache_key_id,
|
|
|
|
)
|
2016-10-04 15:52:26 +02:00
|
|
|
from zerver.lib.message import (
|
2016-10-12 02:14:08 +02:00
|
|
|
access_message,
|
2016-10-04 15:52:26 +02:00
|
|
|
MessageDict,
|
|
|
|
extract_message_dict,
|
2016-11-07 20:40:40 +01:00
|
|
|
render_markdown,
|
2016-10-04 15:52:26 +02:00
|
|
|
stringify_message_dict,
|
|
|
|
)
|
2013-12-12 18:36:32 +01:00
|
|
|
from zerver.lib.response import json_success, json_error
|
2016-07-19 08:12:35 +02:00
|
|
|
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
|
2017-02-20 00:19:29 +01:00
|
|
|
from zerver.lib.timestamp import datetime_to_timestamp
|
2013-12-12 18:36:32 +01:00
|
|
|
from zerver.lib.utils import statsd
|
2014-02-14 15:48:42 +01:00
|
|
|
from zerver.lib.validator import \
|
|
|
|
check_list, check_int, check_dict, check_string, check_bool
|
2014-02-25 22:22:35 +01:00
|
|
|
from zerver.models import Message, UserProfile, Stream, Subscription, \
|
2016-11-09 02:40:54 +01:00
|
|
|
Realm, RealmAlias, Recipient, UserMessage, bulk_get_recipients, get_recipient, \
|
2016-07-27 01:49:26 +02:00
|
|
|
get_user_profile_by_email, get_stream, \
|
2016-10-04 15:52:26 +02:00
|
|
|
parse_usermessage_flags, \
|
2017-01-04 05:30:48 +01:00
|
|
|
email_to_domain, get_realm, get_active_streams, \
|
2016-09-15 00:24:44 +02:00
|
|
|
bulk_get_streams, get_user_profile_by_id
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2013-12-10 23:32:29 +01:00
|
|
|
from sqlalchemy import func
|
|
|
|
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
|
2017-02-22 22:13:57 +01:00
|
|
|
or_, not_, union_all, alias, Selectable, Select, ColumnElement, table
|
2013-12-10 23:32:29 +01:00
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
import re
|
|
|
|
import ujson
|
2016-06-21 21:34:41 +02:00
|
|
|
import datetime
|
2016-06-24 02:26:09 +02:00
|
|
|
|
2015-11-01 17:14:53 +01:00
|
|
|
from six.moves import map
|
2015-11-01 17:15:17 +01:00
|
|
|
import six
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2017-02-23 05:50:15 +01:00
|
|
|
LARGER_THAN_MAX_MESSAGE_ID = 10000000000000000
|
|
|
|
|
2016-04-21 21:47:01 +02:00
|
|
|
class BadNarrowOperator(JsonableError):
|
|
|
|
def __init__(self, desc, status_code=400):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (str, int) -> None
|
2013-12-12 18:36:32 +01:00
|
|
|
self.desc = desc
|
2016-04-21 21:47:01 +02:00
|
|
|
self.status_code = status_code
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
def to_json_error_msg(self):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: () -> str
|
2016-05-25 15:02:02 +02:00
|
|
|
return _('Invalid narrow operator: {}').format(self.desc)
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2016-09-12 02:09:24 +02:00
|
|
|
Query = Any # TODO: Should be Select, but sqlalchemy stubs are busted
|
|
|
|
ConditionTransform = Any # TODO: should be Callable[[ColumnElement], ColumnElement], but sqlalchemy stubs are busted
|
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
# When you add a new operator to this, also update zerver/lib/narrow.py
|
|
|
|
class NarrowBuilder(object):
|
2013-12-10 23:32:29 +01:00
|
|
|
def __init__(self, user_profile, msg_id_column):
|
2016-07-30 01:27:56 +02:00
|
|
|
# type: (UserProfile, str) -> None
|
2013-12-12 18:36:32 +01:00
|
|
|
self.user_profile = user_profile
|
2013-12-10 23:32:29 +01:00
|
|
|
self.msg_id_column = msg_id_column
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-02-27 20:09:59 +01:00
|
|
|
def add_term(self, query, term):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, Dict[str, Any]) -> Query
|
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
# We have to be careful here because we're letting users call a method
|
|
|
|
# by name! The prefix 'by_' prevents it from colliding with builtin
|
|
|
|
# Python __magic__ stuff.
|
2014-02-10 21:45:53 +01:00
|
|
|
operator = term['operator']
|
|
|
|
operand = term['operand']
|
2014-02-11 21:36:59 +01:00
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
negated = term.get('negated', False)
|
2014-02-11 21:36:59 +01:00
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
method_name = 'by_' + operator.replace('-', '_')
|
|
|
|
method = getattr(self, method_name, None)
|
|
|
|
if method is None:
|
|
|
|
raise BadNarrowOperator('unknown operator ' + operator)
|
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
if negated:
|
|
|
|
maybe_negate = not_
|
|
|
|
else:
|
|
|
|
maybe_negate = lambda cond: cond
|
|
|
|
|
|
|
|
return method(query, operand, maybe_negate)
|
|
|
|
|
2014-03-05 18:41:01 +01:00
|
|
|
def by_has(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2014-03-05 18:41:01 +01:00
|
|
|
if operand not in ['attachment', 'image', 'link']:
|
|
|
|
raise BadNarrowOperator("unknown 'has' operand " + operand)
|
|
|
|
col_name = 'has_' + operand
|
|
|
|
cond = column(col_name)
|
|
|
|
return query.where(maybe_negate(cond))
|
|
|
|
|
2014-02-27 23:57:16 +01:00
|
|
|
def by_in(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2014-02-27 23:57:16 +01:00
|
|
|
if operand == 'home':
|
|
|
|
conditions = exclude_muting_conditions(self.user_profile, [])
|
|
|
|
return query.where(and_(*conditions))
|
|
|
|
elif operand == 'all':
|
|
|
|
return query
|
|
|
|
|
|
|
|
raise BadNarrowOperator("unknown 'in' operand " + operand)
|
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
def by_is(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2013-12-12 18:36:32 +01:00
|
|
|
if operand == 'private':
|
2017-02-22 22:13:57 +01:00
|
|
|
query = query.select_from(join(query.froms[0], table("zerver_recipient"),
|
2013-12-10 23:32:29 +01:00
|
|
|
column("recipient_id") ==
|
|
|
|
literal_column("zerver_recipient.id")))
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = or_(column("type") == Recipient.PERSONAL,
|
|
|
|
column("type") == Recipient.HUDDLE)
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
elif operand == 'starred':
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-10 23:32:29 +01:00
|
|
|
elif operand == 'mentioned' or operand == 'alerted':
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
raise BadNarrowOperator("unknown 'is' operand " + operand)
|
|
|
|
|
2014-01-07 22:15:22 +01:00
|
|
|
_alphanum = frozenset(
|
|
|
|
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
|
|
|
|
|
|
|
|
def _pg_re_escape(self, pattern):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (Text) -> Text
|
2014-01-07 22:15:22 +01:00
|
|
|
"""
|
|
|
|
Escape user input to place in a regex
|
|
|
|
|
|
|
|
Python's re.escape escapes unicode characters in a way which postgres
|
|
|
|
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
|
|
|
|
them for postgres, u'\u03bb' to u'\\u03bb'.
|
|
|
|
"""
|
|
|
|
s = list(pattern)
|
|
|
|
for i, c in enumerate(s):
|
|
|
|
if c not in self._alphanum:
|
|
|
|
if c == '\000':
|
|
|
|
s[1] = '\\000'
|
|
|
|
elif ord(c) >= 128:
|
|
|
|
# convert the character to hex postgres regex will take
|
|
|
|
# \uXXXX
|
|
|
|
s[i] = '\\u{:0>4x}'.format(ord(c))
|
|
|
|
else:
|
|
|
|
s[i] = '\\' + c
|
|
|
|
return ''.join(s)
|
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
def by_stream(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2013-12-12 18:36:32 +01:00
|
|
|
stream = get_stream(operand, self.user_profile.realm)
|
|
|
|
if stream is None:
|
|
|
|
raise BadNarrowOperator('unknown stream ' + operand)
|
|
|
|
|
2016-07-27 01:45:29 +02:00
|
|
|
if self.user_profile.realm.is_zephyr_mirror_realm:
|
2013-12-12 18:36:32 +01:00
|
|
|
# MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
|
|
|
|
# (unsocial, ununsocial, social.d, etc)
|
|
|
|
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
|
|
|
|
if m:
|
|
|
|
base_stream_name = m.group(1)
|
|
|
|
else:
|
|
|
|
base_stream_name = stream.name
|
|
|
|
|
2014-01-24 23:30:53 +01:00
|
|
|
matching_streams = get_active_streams(self.user_profile.realm).filter(
|
|
|
|
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
|
2013-12-12 18:36:32 +01:00
|
|
|
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
|
2016-01-25 01:27:18 +01:00
|
|
|
recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
|
|
|
|
cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = column("recipient_id") == recipient.id
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
def by_topic(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2016-07-27 01:45:29 +02:00
|
|
|
if self.user_profile.realm.is_zephyr_mirror_realm:
|
2013-12-12 18:36:32 +01:00
|
|
|
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
|
|
|
|
# (foo, foo.d, foo.d.d, etc)
|
|
|
|
m = re.search(r'^(.*?)(?:\.d)*$', operand, re.IGNORECASE)
|
|
|
|
if m:
|
|
|
|
base_topic = m.group(1)
|
|
|
|
else:
|
|
|
|
base_topic = operand
|
|
|
|
|
|
|
|
# Additionally, MIT users expect the empty instance and
|
|
|
|
# instance "personal" to be the same.
|
|
|
|
if base_topic in ('', 'personal', '(instance "")'):
|
2017-02-22 21:23:22 +01:00
|
|
|
cond = or_(
|
|
|
|
func.upper(column("subject")) == func.upper(literal("")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal(".d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal(".d.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal(".d.d.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal(".d.d.d.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal("personal")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal("personal.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal("personal.d.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal("personal.d.d.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal("personal.d.d.d.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal('(instance "")')),
|
|
|
|
func.upper(column("subject")) == func.upper(literal('(instance "").d')),
|
|
|
|
func.upper(column("subject")) == func.upper(literal('(instance "").d.d')),
|
|
|
|
func.upper(column("subject")) == func.upper(literal('(instance "").d.d.d')),
|
|
|
|
func.upper(column("subject")) == func.upper(literal('(instance "").d.d.d.d')),
|
|
|
|
)
|
2013-12-12 18:36:32 +01:00
|
|
|
else:
|
2017-02-22 21:23:22 +01:00
|
|
|
# We limit `.d` counts, since postgres has much better
|
|
|
|
# query planning for this than they do for a regular
|
|
|
|
# expression (which would sometimes table scan).
|
|
|
|
cond = or_(
|
|
|
|
func.upper(column("subject")) == func.upper(literal(base_topic)),
|
|
|
|
func.upper(column("subject")) == func.upper(literal(base_topic + ".d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal(base_topic + ".d.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal(base_topic + ".d.d.d")),
|
|
|
|
func.upper(column("subject")) == func.upper(literal(base_topic + ".d.d.d.d")),
|
|
|
|
)
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = func.upper(column("subject")) == func.upper(literal(operand))
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
def by_sender(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2013-12-12 18:36:32 +01:00
|
|
|
try:
|
|
|
|
sender = get_user_profile_by_email(operand)
|
|
|
|
except UserProfile.DoesNotExist:
|
|
|
|
raise BadNarrowOperator('unknown user ' + operand)
|
|
|
|
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = column("sender_id") == literal(sender.id)
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
def by_near(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2013-12-10 23:32:29 +01:00
|
|
|
return query
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
def by_id(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = self.msg_id_column == literal(operand)
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
def by_pm_with(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2013-12-12 18:36:32 +01:00
|
|
|
if ',' in operand:
|
|
|
|
# Huddle
|
|
|
|
try:
|
|
|
|
emails = [e.strip() for e in operand.split(',')]
|
|
|
|
recipient = recipient_for_emails(emails, False,
|
2016-12-03 00:04:17 +01:00
|
|
|
self.user_profile, self.user_profile)
|
2013-12-12 18:36:32 +01:00
|
|
|
except ValidationError:
|
|
|
|
raise BadNarrowOperator('unknown recipient ' + operand)
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = column("recipient_id") == recipient.id
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
else:
|
|
|
|
# Personal message
|
|
|
|
self_recipient = get_recipient(Recipient.PERSONAL, type_id=self.user_profile.id)
|
|
|
|
if operand == self.user_profile.email:
|
|
|
|
# Personals with self
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = and_(column("sender_id") == self.user_profile.id,
|
|
|
|
column("recipient_id") == self_recipient.id)
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
# Personals with other user; include both directions.
|
|
|
|
try:
|
|
|
|
narrow_profile = get_user_profile_by_email(operand)
|
|
|
|
except UserProfile.DoesNotExist:
|
|
|
|
raise BadNarrowOperator('unknown user ' + operand)
|
|
|
|
|
|
|
|
narrow_recipient = get_recipient(Recipient.PERSONAL, narrow_profile.id)
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = or_(and_(column("sender_id") == narrow_profile.id,
|
|
|
|
column("recipient_id") == self_recipient.id),
|
|
|
|
and_(column("sender_id") == self.user_profile.id,
|
|
|
|
column("recipient_id") == narrow_recipient.id))
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-10 23:32:29 +01:00
|
|
|
|
2014-02-12 19:09:11 +01:00
|
|
|
def by_search(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2016-04-24 17:08:51 +02:00
|
|
|
if settings.USING_PGROONGA:
|
|
|
|
return self._by_search_pgroonga(query, operand, maybe_negate)
|
|
|
|
else:
|
|
|
|
return self._by_search_tsearch(query, operand, maybe_negate)
|
|
|
|
|
|
|
|
def _by_search_pgroonga(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2016-04-24 17:08:51 +02:00
|
|
|
match_positions_byte = func.pgroonga.match_positions_byte
|
|
|
|
query_extract_keywords = func.pgroonga.query_extract_keywords
|
|
|
|
keywords = query_extract_keywords(operand)
|
|
|
|
query = query.column(match_positions_byte(column("rendered_content"),
|
|
|
|
keywords).label("content_matches"))
|
|
|
|
query = query.column(match_positions_byte(column("subject"),
|
|
|
|
keywords).label("subject_matches"))
|
|
|
|
condition = column("search_pgroonga").op("@@")(operand)
|
|
|
|
return query.where(maybe_negate(condition))
|
|
|
|
|
|
|
|
def _by_search_tsearch(self, query, operand, maybe_negate):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Query, str, ConditionTransform) -> Query
|
2013-12-10 23:32:29 +01:00
|
|
|
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
|
|
|
|
ts_locs_array = func.ts_match_locs_array
|
|
|
|
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
|
|
|
|
column("rendered_content"),
|
|
|
|
tsquery).label("content_matches"))
|
2013-12-02 20:29:57 +01:00
|
|
|
# We HTML-escape the subject in Postgres to avoid doing a server round-trip
|
2013-12-10 23:32:29 +01:00
|
|
|
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
|
|
|
|
func.escape_html(column("subject")),
|
|
|
|
tsquery).label("subject_matches"))
|
2013-12-02 20:29:57 +01:00
|
|
|
|
|
|
|
# Do quoted string matching. We really want phrase
|
|
|
|
# search here so we can ignore punctuation and do
|
|
|
|
# stemming, but there isn't a standard phrase search
|
|
|
|
# mechanism in Postgres
|
|
|
|
for term in re.findall('"[^"]+"|\S+', operand):
|
|
|
|
if term[0] == '"' and term[-1] == '"':
|
|
|
|
term = term[1:-1]
|
2013-12-10 23:32:29 +01:00
|
|
|
term = '%' + connection.ops.prep_for_like_query(term) + '%'
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = or_(column("content").ilike(term),
|
|
|
|
column("subject").ilike(term))
|
2014-02-12 19:09:11 +01:00
|
|
|
query = query.where(maybe_negate(cond))
|
2013-12-02 20:29:57 +01:00
|
|
|
|
2014-02-11 23:33:24 +01:00
|
|
|
cond = column("search_tsvector").op("@@")(tsquery)
|
2014-02-12 19:09:11 +01:00
|
|
|
return query.where(maybe_negate(cond))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2016-08-25 08:00:52 +02:00
|
|
|
# Apparently, the offsets we get from tsearch_extras are counted in
|
|
|
|
# unicode characters, not in bytes, so we do our processing with text,
|
|
|
|
# not bytes.
|
2016-04-24 17:08:51 +02:00
|
|
|
def highlight_string_text_offsets(text, locs):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (AnyStr, Iterable[Tuple[int, int]]) -> Text
|
2016-08-25 08:00:52 +02:00
|
|
|
string = force_text(text)
|
|
|
|
highlight_start = u'<span class="highlight">'
|
|
|
|
highlight_stop = u'</span>'
|
2013-11-26 00:41:24 +01:00
|
|
|
pos = 0
|
2016-08-25 08:00:52 +02:00
|
|
|
result = u''
|
2013-11-26 00:41:24 +01:00
|
|
|
for loc in locs:
|
|
|
|
(offset, length) = loc
|
|
|
|
result += string[pos:offset]
|
|
|
|
result += highlight_start
|
|
|
|
result += string[offset:offset + length]
|
|
|
|
result += highlight_stop
|
|
|
|
pos = offset + length
|
|
|
|
result += string[pos:]
|
2016-08-25 08:00:52 +02:00
|
|
|
return result
|
2013-11-26 00:41:24 +01:00
|
|
|
|
2016-04-24 17:08:51 +02:00
|
|
|
def highlight_string_bytes_offsets(text, locs):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (AnyStr, Iterable[Tuple[int, int]]) -> Text
|
2016-04-24 17:08:51 +02:00
|
|
|
string = force_bytes(text)
|
|
|
|
highlight_start = b'<span class="highlight">'
|
|
|
|
highlight_stop = b'</span>'
|
|
|
|
pos = 0
|
|
|
|
result = b''
|
|
|
|
for loc in locs:
|
|
|
|
(offset, length) = loc
|
|
|
|
result += string[pos:offset]
|
|
|
|
result += highlight_start
|
|
|
|
result += string[offset:offset + length]
|
|
|
|
result += highlight_stop
|
|
|
|
pos = offset + length
|
|
|
|
result += string[pos:]
|
|
|
|
return force_text(result)
|
|
|
|
|
|
|
|
def highlight_string(text, locs):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (AnyStr, Iterable[Tuple[int, int]]) -> Text
|
2016-04-24 17:08:51 +02:00
|
|
|
if settings.USING_PGROONGA:
|
|
|
|
return highlight_string_bytes_offsets(text, locs)
|
|
|
|
else:
|
|
|
|
return highlight_string_text_offsets(text, locs)
|
|
|
|
|
2013-12-10 23:32:29 +01:00
|
|
|
def get_search_fields(rendered_content, subject, content_matches, subject_matches):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (Text, Text, Iterable[Tuple[int, int]], Iterable[Tuple[int, int]]) -> Dict[str, Text]
|
2013-12-10 23:32:29 +01:00
|
|
|
return dict(match_content=highlight_string(rendered_content, content_matches),
|
|
|
|
match_subject=highlight_string(escape_html(subject), subject_matches))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
def narrow_parameter(json):
|
2017-02-11 05:45:39 +01:00
|
|
|
# type: (str) -> Optional[List[Dict[str, Any]]]
|
2016-07-30 01:27:56 +02:00
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
# FIXME: A hack to support old mobile clients
|
|
|
|
if json == '{}':
|
|
|
|
return None
|
|
|
|
|
2014-02-10 21:45:53 +01:00
|
|
|
data = ujson.loads(json)
|
|
|
|
if not isinstance(data, list):
|
|
|
|
raise ValueError("argument is not a list")
|
|
|
|
|
|
|
|
def convert_term(elem):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Union[Dict, List]) -> Dict[str, Any]
|
|
|
|
|
2014-02-11 00:31:26 +01:00
|
|
|
# We have to support a legacy tuple format.
|
|
|
|
if isinstance(elem, list):
|
2017-01-24 05:50:04 +01:00
|
|
|
if (len(elem) != 2 or
|
|
|
|
any(not isinstance(x, str) and not isinstance(x, Text)
|
|
|
|
for x in elem)):
|
2014-02-11 00:31:26 +01:00
|
|
|
raise ValueError("element is not a string pair")
|
|
|
|
return dict(operator=elem[0], operand=elem[1])
|
|
|
|
|
|
|
|
if isinstance(elem, dict):
|
|
|
|
validator = check_dict([
|
|
|
|
('operator', check_string),
|
|
|
|
('operand', check_string),
|
|
|
|
])
|
|
|
|
|
|
|
|
error = validator('elem', elem)
|
|
|
|
if error:
|
|
|
|
raise JsonableError(error)
|
|
|
|
|
|
|
|
# whitelist the fields we care about for now
|
2014-02-11 21:36:59 +01:00
|
|
|
return dict(
|
|
|
|
operator=elem['operator'],
|
|
|
|
operand=elem['operand'],
|
|
|
|
negated=elem.get('negated', False),
|
|
|
|
)
|
2014-02-11 00:31:26 +01:00
|
|
|
|
|
|
|
raise ValueError("element is not a dictionary")
|
2014-02-10 21:45:53 +01:00
|
|
|
|
2015-11-01 17:14:53 +01:00
|
|
|
return list(map(convert_term, data))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2016-07-30 01:27:56 +02:00
|
|
|
def is_public_stream(stream_name, realm):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (Text, Realm) -> bool
|
2016-07-27 01:49:26 +02:00
|
|
|
"""
|
|
|
|
Determine whether a stream is public, so that
|
|
|
|
our caller can decide whether we can get
|
|
|
|
historical messages for a narrowing search.
|
|
|
|
|
|
|
|
Because of the way our search is currently structured,
|
|
|
|
we may be passed an invalid stream here. We return
|
|
|
|
False in that situation, and subsequent code will do
|
|
|
|
validation and raise the appropriate JsonableError.
|
|
|
|
"""
|
2016-07-30 01:27:56 +02:00
|
|
|
stream = get_stream(stream_name, realm)
|
2013-12-12 18:36:32 +01:00
|
|
|
if stream is None:
|
|
|
|
return False
|
|
|
|
return stream.is_public()
|
|
|
|
|
2014-02-13 16:24:06 +01:00
|
|
|
|
|
|
|
def ok_to_include_history(narrow, realm):
|
2017-02-11 05:45:39 +01:00
|
|
|
# type: (Optional[Iterable[Dict[str, Any]]], Realm) -> bool
|
2016-07-30 01:27:56 +02:00
|
|
|
|
2014-02-13 16:24:06 +01:00
|
|
|
# There are occasions where we need to find Message rows that
|
|
|
|
# have no corresponding UserMessage row, because the user is
|
|
|
|
# reading a public stream that might include messages that
|
|
|
|
# were sent while the user was not subscribed, but which they are
|
|
|
|
# allowed to see. We have to be very careful about constructing
|
|
|
|
# queries in those situations, so this function should return True
|
|
|
|
# only if we are 100% sure that we're gonna add a clause to the
|
|
|
|
# query that narrows to a particular public stream on the user's realm.
|
|
|
|
# If we screw this up, then we can get into a nasty situation of
|
|
|
|
# polluting our narrow results with messages from other realms.
|
2013-12-12 18:36:32 +01:00
|
|
|
include_history = False
|
|
|
|
if narrow is not None:
|
2014-02-10 21:45:53 +01:00
|
|
|
for term in narrow:
|
2014-02-13 18:53:51 +01:00
|
|
|
if term['operator'] == "stream" and not term.get('negated', False):
|
2014-02-13 16:24:06 +01:00
|
|
|
if is_public_stream(term['operand'], realm):
|
2013-12-12 18:36:32 +01:00
|
|
|
include_history = True
|
2014-01-14 22:53:28 +01:00
|
|
|
# Disable historical messages if the user is narrowing on anything
|
|
|
|
# that's a property on the UserMessage table. There cannot be
|
|
|
|
# historical messages in these cases anyway.
|
2014-02-10 21:45:53 +01:00
|
|
|
for term in narrow:
|
|
|
|
if term['operator'] == "is":
|
2013-12-12 18:36:32 +01:00
|
|
|
include_history = False
|
|
|
|
|
2014-02-13 16:24:06 +01:00
|
|
|
return include_history
|
|
|
|
|
2014-02-25 15:41:32 +01:00
|
|
|
def get_stream_name_from_narrow(narrow):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (Iterable[Dict[str, Any]]) -> Optional[Text]
|
2014-02-25 15:41:32 +01:00
|
|
|
for term in narrow:
|
|
|
|
if term['operator'] == 'stream':
|
|
|
|
return term['operand'].lower()
|
|
|
|
return None
|
|
|
|
|
|
|
|
def exclude_muting_conditions(user_profile, narrow):
|
2017-02-11 05:45:39 +01:00
|
|
|
# type: (UserProfile, Optional[Iterable[Dict[str, Any]]]) -> List[Selectable]
|
2014-02-25 22:22:35 +01:00
|
|
|
conditions = []
|
2014-02-25 15:41:32 +01:00
|
|
|
stream_name = get_stream_name_from_narrow(narrow)
|
2014-02-25 22:22:35 +01:00
|
|
|
|
|
|
|
if stream_name is None:
|
|
|
|
rows = Subscription.objects.filter(
|
|
|
|
user_profile=user_profile,
|
|
|
|
active=True,
|
|
|
|
in_home_view=False,
|
|
|
|
recipient__type=Recipient.STREAM
|
|
|
|
).values('recipient_id')
|
2015-11-01 17:14:53 +01:00
|
|
|
muted_recipient_ids = [row['recipient_id'] for row in rows]
|
2014-02-25 22:22:35 +01:00
|
|
|
condition = not_(column("recipient_id").in_(muted_recipient_ids))
|
|
|
|
conditions.append(condition)
|
|
|
|
|
2014-02-24 23:00:58 +01:00
|
|
|
muted_topics = ujson.loads(user_profile.muted_topics)
|
|
|
|
if muted_topics:
|
2014-02-25 15:41:32 +01:00
|
|
|
if stream_name is not None:
|
|
|
|
muted_topics = [m for m in muted_topics if m[0].lower() == stream_name]
|
|
|
|
if not muted_topics:
|
2014-02-25 22:22:35 +01:00
|
|
|
return conditions
|
2014-02-25 15:41:32 +01:00
|
|
|
|
2014-02-24 23:00:58 +01:00
|
|
|
muted_streams = bulk_get_streams(user_profile.realm,
|
|
|
|
[muted[0] for muted in muted_topics])
|
|
|
|
muted_recipients = bulk_get_recipients(Recipient.STREAM,
|
2016-03-11 10:57:29 +01:00
|
|
|
[stream.id for stream in six.itervalues(muted_streams)])
|
2014-02-24 23:00:58 +01:00
|
|
|
recipient_map = dict((s.name.lower(), muted_recipients[s.id].id)
|
2016-03-11 10:57:29 +01:00
|
|
|
for s in six.itervalues(muted_streams))
|
2014-02-24 23:00:58 +01:00
|
|
|
|
2014-02-24 23:11:27 +01:00
|
|
|
muted_topics = [m for m in muted_topics if m[0].lower() in recipient_map]
|
2014-02-24 23:00:58 +01:00
|
|
|
|
2014-02-24 23:11:27 +01:00
|
|
|
if muted_topics:
|
|
|
|
def mute_cond(muted):
|
2016-09-12 02:09:24 +02:00
|
|
|
# type: (Tuple[str, str]) -> Selectable
|
2014-02-24 23:11:27 +01:00
|
|
|
stream_cond = column("recipient_id") == recipient_map[muted[0].lower()]
|
|
|
|
topic_cond = func.upper(column("subject")) == func.upper(muted[1])
|
|
|
|
return and_(stream_cond, topic_cond)
|
|
|
|
|
2015-11-01 17:14:53 +01:00
|
|
|
condition = not_(or_(*list(map(mute_cond, muted_topics))))
|
2014-02-25 22:22:35 +01:00
|
|
|
return conditions + [condition]
|
2014-02-24 23:00:58 +01:00
|
|
|
|
2014-02-25 22:22:35 +01:00
|
|
|
return conditions
|
2014-02-24 23:00:58 +01:00
|
|
|
|
2014-02-13 16:24:06 +01:00
|
|
|
@has_request_variables
|
|
|
|
def get_old_messages_backend(request, user_profile,
|
|
|
|
anchor = REQ(converter=int),
|
|
|
|
num_before = REQ(converter=to_non_negative_int),
|
|
|
|
num_after = REQ(converter=to_non_negative_int),
|
|
|
|
narrow = REQ('narrow', converter=narrow_parameter, default=None),
|
|
|
|
use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
|
|
|
|
apply_markdown=REQ(default=True,
|
|
|
|
converter=ujson.loads)):
|
2016-07-30 01:27:56 +02:00
|
|
|
# type: (HttpRequest, UserProfile, int, int, int, Optional[List[Dict[str, Any]]], bool, bool) -> HttpResponse
|
2014-02-13 16:24:06 +01:00
|
|
|
include_history = ok_to_include_history(narrow, user_profile.realm)
|
|
|
|
|
2014-01-31 20:19:12 +01:00
|
|
|
if include_history and not use_first_unread_anchor:
|
2017-02-22 22:13:57 +01:00
|
|
|
query = select([column("id").label("message_id")], None, table("zerver_message"))
|
2013-12-10 23:32:29 +01:00
|
|
|
inner_msg_id_col = literal_column("zerver_message.id")
|
2013-12-12 22:50:49 +01:00
|
|
|
elif narrow is None:
|
|
|
|
query = select([column("message_id"), column("flags")],
|
|
|
|
column("user_profile_id") == literal(user_profile.id),
|
2017-02-22 22:13:57 +01:00
|
|
|
table("zerver_usermessage"))
|
2013-12-12 22:50:49 +01:00
|
|
|
inner_msg_id_col = column("message_id")
|
2013-12-12 18:36:32 +01:00
|
|
|
else:
|
2013-12-12 22:50:49 +01:00
|
|
|
# TODO: Don't do this join if we're not doing a search
|
|
|
|
query = select([column("message_id"), column("flags")],
|
2013-12-10 23:32:29 +01:00
|
|
|
column("user_profile_id") == literal(user_profile.id),
|
2017-02-22 22:13:57 +01:00
|
|
|
join(table("zerver_usermessage"), table("zerver_message"),
|
2013-12-10 23:32:29 +01:00
|
|
|
literal_column("zerver_usermessage.message_id") ==
|
|
|
|
literal_column("zerver_message.id")))
|
|
|
|
inner_msg_id_col = column("message_id")
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2013-12-12 22:50:49 +01:00
|
|
|
num_extra_messages = 1
|
|
|
|
is_search = False
|
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
if narrow is not None:
|
2013-12-12 22:50:49 +01:00
|
|
|
# Add some metadata to our logging data for narrows
|
2013-12-12 18:36:32 +01:00
|
|
|
verbose_operators = []
|
2014-02-10 21:45:53 +01:00
|
|
|
for term in narrow:
|
|
|
|
if term['operator'] == "is":
|
|
|
|
verbose_operators.append("is:" + term['operand'])
|
2013-12-12 18:36:32 +01:00
|
|
|
else:
|
2014-02-10 21:45:53 +01:00
|
|
|
verbose_operators.append(term['operator'])
|
2013-12-12 18:36:32 +01:00
|
|
|
request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)
|
|
|
|
|
2013-12-12 22:50:49 +01:00
|
|
|
# Build the query for the narrow
|
2013-12-12 18:36:32 +01:00
|
|
|
num_extra_messages = 0
|
2014-02-27 20:09:59 +01:00
|
|
|
builder = NarrowBuilder(user_profile, inner_msg_id_col)
|
2017-01-16 16:53:20 +01:00
|
|
|
search_term = None # type: Optional[Dict[str, Any]]
|
2014-02-10 21:45:53 +01:00
|
|
|
for term in narrow:
|
2017-01-16 16:53:20 +01:00
|
|
|
if term['operator'] == 'search':
|
|
|
|
if not is_search:
|
|
|
|
search_term = term
|
2017-02-22 22:13:57 +01:00
|
|
|
query = query.column(column("subject")).column(column("rendered_content"))
|
2017-01-16 16:53:20 +01:00
|
|
|
is_search = True
|
|
|
|
else:
|
|
|
|
# Join the search operators if there are multiple of them
|
|
|
|
search_term['operand'] += ' ' + term['operand']
|
|
|
|
else:
|
|
|
|
query = builder.add_term(query, term)
|
|
|
|
if is_search:
|
|
|
|
query = builder.add_term(query, search_term)
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
# We add 1 to the number of messages requested if no narrow was
|
|
|
|
# specified to ensure that the resulting list always contains the
|
|
|
|
# anchor message. If a narrow was specified, the anchor message
|
|
|
|
# might not match the narrow anyway.
|
|
|
|
if num_after != 0:
|
|
|
|
num_after += num_extra_messages
|
|
|
|
else:
|
|
|
|
num_before += num_extra_messages
|
|
|
|
|
2014-01-31 20:19:12 +01:00
|
|
|
sa_conn = get_sqlalchemy_connection()
|
|
|
|
if use_first_unread_anchor:
|
2014-02-12 22:09:34 +01:00
|
|
|
condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0
|
|
|
|
|
|
|
|
# We exclude messages on muted topics when finding the first unread
|
|
|
|
# message in this narrow
|
2014-02-25 15:41:32 +01:00
|
|
|
muting_conditions = exclude_muting_conditions(user_profile, narrow)
|
2014-02-24 23:00:58 +01:00
|
|
|
if muting_conditions:
|
|
|
|
condition = and_(condition, *muting_conditions)
|
2014-02-12 22:09:34 +01:00
|
|
|
|
|
|
|
first_unread_query = query.where(condition)
|
2014-01-31 20:19:12 +01:00
|
|
|
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
|
|
|
|
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
|
|
|
|
if len(first_unread_result) > 0:
|
|
|
|
anchor = first_unread_result[0][0]
|
|
|
|
else:
|
2017-02-23 05:50:15 +01:00
|
|
|
anchor = LARGER_THAN_MAX_MESSAGE_ID
|
2014-01-31 20:19:12 +01:00
|
|
|
|
2013-12-10 23:32:29 +01:00
|
|
|
before_query = None
|
|
|
|
after_query = None
|
2013-12-12 18:36:32 +01:00
|
|
|
if num_before != 0:
|
|
|
|
before_anchor = anchor
|
|
|
|
if num_after != 0:
|
|
|
|
# Don't include the anchor in both the before query and the after query
|
|
|
|
before_anchor = anchor - 1
|
2013-12-12 22:50:49 +01:00
|
|
|
before_query = query.where(inner_msg_id_col <= before_anchor) \
|
|
|
|
.order_by(inner_msg_id_col.desc()).limit(num_before)
|
2013-12-12 18:36:32 +01:00
|
|
|
if num_after != 0:
|
2013-12-12 22:50:49 +01:00
|
|
|
after_query = query.where(inner_msg_id_col >= anchor) \
|
|
|
|
.order_by(inner_msg_id_col.asc()).limit(num_after)
|
2013-12-10 23:32:29 +01:00
|
|
|
|
2017-02-23 05:50:15 +01:00
|
|
|
if anchor == LARGER_THAN_MAX_MESSAGE_ID:
|
2017-02-22 23:39:40 +01:00
|
|
|
# There's no need for an after_query if we're targeting just the target message.
|
|
|
|
after_query = None
|
2014-02-19 00:35:43 +01:00
|
|
|
|
2013-12-12 22:50:49 +01:00
|
|
|
if before_query is not None:
|
|
|
|
if after_query is not None:
|
|
|
|
query = union_all(before_query.self_group(), after_query.self_group())
|
2013-12-10 23:32:29 +01:00
|
|
|
else:
|
2013-12-12 22:50:49 +01:00
|
|
|
query = before_query
|
2017-02-22 23:39:40 +01:00
|
|
|
elif after_query is not None:
|
2013-12-12 22:50:49 +01:00
|
|
|
query = after_query
|
2017-02-22 23:39:40 +01:00
|
|
|
else:
|
|
|
|
# This can happen when a narrow is specified.
|
|
|
|
query = query.where(inner_msg_id_col == anchor)
|
|
|
|
|
2013-12-12 22:50:49 +01:00
|
|
|
main_query = alias(query)
|
|
|
|
query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
|
2014-01-08 18:01:35 +01:00
|
|
|
# This is a hack to tag the query we use for testing
|
|
|
|
query = query.prefix_with("/* get_old_messages */")
|
2013-12-12 22:50:49 +01:00
|
|
|
query_result = list(sa_conn.execute(query).fetchall())
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
# The following is a little messy, but ensures that the code paths
|
|
|
|
# are similar regardless of the value of include_history. The
|
|
|
|
# 'user_messages' dictionary maps each message to the user's
|
|
|
|
# UserMessage object for that message, which we will attach to the
|
|
|
|
# rendered message dict before returning it. We attempt to
|
2016-03-31 03:39:51 +02:00
|
|
|
# bulk-fetch rendered message dicts from remote cache using the
|
2013-12-12 18:36:32 +01:00
|
|
|
# 'messages' list.
|
2016-12-25 00:44:26 +01:00
|
|
|
search_fields = dict() # type: Dict[int, Dict[str, Text]]
|
2016-01-26 01:54:56 +01:00
|
|
|
message_ids = [] # type: List[int]
|
|
|
|
user_message_flags = {} # type: Dict[int, List[str]]
|
2013-12-12 22:50:49 +01:00
|
|
|
if include_history:
|
2013-12-10 23:32:29 +01:00
|
|
|
message_ids = [row[0] for row in query_result]
|
|
|
|
|
|
|
|
# TODO: This could be done with an outer join instead of two queries
|
2013-12-12 18:36:32 +01:00
|
|
|
user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
|
|
|
|
UserMessage.objects.filter(user_profile=user_profile,
|
2013-12-10 23:32:29 +01:00
|
|
|
message__id__in=message_ids))
|
|
|
|
for row in query_result:
|
|
|
|
message_id = row[0]
|
|
|
|
if user_message_flags.get(message_id) is None:
|
|
|
|
user_message_flags[message_id] = ["read", "historical"]
|
2013-12-12 18:36:32 +01:00
|
|
|
if is_search:
|
2013-12-10 23:32:29 +01:00
|
|
|
(_, subject, rendered_content, content_matches, subject_matches) = row
|
|
|
|
search_fields[message_id] = get_search_fields(rendered_content, subject,
|
|
|
|
content_matches, subject_matches)
|
2013-12-12 18:36:32 +01:00
|
|
|
else:
|
2013-12-10 23:32:29 +01:00
|
|
|
for row in query_result:
|
|
|
|
message_id = row[0]
|
|
|
|
flags = row[1]
|
|
|
|
user_message_flags[message_id] = parse_usermessage_flags(flags)
|
|
|
|
|
|
|
|
message_ids.append(message_id)
|
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
if is_search:
|
2013-12-10 23:32:29 +01:00
|
|
|
(_, _, subject, rendered_content, content_matches, subject_matches) = row
|
|
|
|
search_fields[message_id] = get_search_fields(rendered_content, subject,
|
|
|
|
content_matches, subject_matches)
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2016-10-04 15:52:26 +02:00
|
|
|
cache_transformer = lambda row: MessageDict.build_dict_from_raw_db_row(row, apply_markdown)
|
2013-12-12 18:36:32 +01:00
|
|
|
id_fetcher = lambda row: row['id']
|
|
|
|
|
|
|
|
message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
|
|
|
|
Message.get_raw_db_rows,
|
|
|
|
message_ids,
|
|
|
|
id_fetcher=id_fetcher,
|
|
|
|
cache_transformer=cache_transformer,
|
|
|
|
extractor=extract_message_dict,
|
|
|
|
setter=stringify_message_dict)
|
|
|
|
|
|
|
|
message_list = []
|
|
|
|
for message_id in message_ids:
|
|
|
|
msg_dict = message_dicts[message_id]
|
|
|
|
msg_dict.update({"flags": user_message_flags[message_id]})
|
|
|
|
msg_dict.update(search_fields.get(message_id, {}))
|
|
|
|
message_list.append(msg_dict)
|
|
|
|
|
|
|
|
statsd.incr('loaded_old_messages', len(message_list))
|
|
|
|
ret = {'messages': message_list,
|
|
|
|
"result": "success",
|
|
|
|
"msg": ""}
|
|
|
|
return json_success(ret)
|
|
|
|
|
|
|
|
@has_request_variables
|
|
|
|
def update_message_flags(request, user_profile,
|
2016-06-16 09:40:25 +02:00
|
|
|
messages=REQ(validator=check_list(check_int)),
|
|
|
|
operation=REQ('op'), flag=REQ(),
|
|
|
|
all=REQ(validator=check_bool, default=False),
|
|
|
|
stream_name=REQ(default=None),
|
|
|
|
topic_name=REQ(default=None)):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (HttpRequest, UserProfile, List[int], Text, Text, bool, Optional[Text], Optional[Text]) -> HttpResponse
|
2016-07-13 03:16:42 +02:00
|
|
|
if all:
|
|
|
|
target_count_str = "all"
|
|
|
|
else:
|
|
|
|
target_count_str = str(len(messages))
|
|
|
|
log_data_str = "[%s %s/%s]" % (operation, flag, target_count_str)
|
|
|
|
request._log_data["extra"] = log_data_str
|
2016-05-08 15:20:51 +02:00
|
|
|
stream = None
|
|
|
|
if stream_name is not None:
|
|
|
|
stream = get_stream(stream_name, user_profile.realm)
|
|
|
|
if not stream:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_('No such stream \'%s\'') % (stream_name,))
|
2016-05-08 15:20:51 +02:00
|
|
|
if topic_name:
|
|
|
|
topic_exists = UserMessage.objects.filter(user_profile=user_profile,
|
|
|
|
message__recipient__type_id=stream.id,
|
|
|
|
message__recipient__type=Recipient.STREAM,
|
|
|
|
message__subject__iexact=topic_name).exists()
|
|
|
|
if not topic_exists:
|
2016-05-25 15:02:02 +02:00
|
|
|
raise JsonableError(_('No such topic \'%s\'') % (topic_name,))
|
2016-07-13 03:16:42 +02:00
|
|
|
count = do_update_message_flags(user_profile, operation, flag, messages,
|
|
|
|
all, stream, topic_name)
|
|
|
|
|
|
|
|
# If we succeed, update log data str with the actual count for how
|
|
|
|
# many messages were updated.
|
|
|
|
if count != len(messages):
|
|
|
|
log_data_str = "[%s %s/%s] actually %s" % (operation, flag, target_count_str, count)
|
|
|
|
request._log_data["extra"] = log_data_str
|
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
return json_success({'result': 'success',
|
|
|
|
'messages': messages,
|
|
|
|
'msg': ''})
|
|
|
|
|
|
|
|
def create_mirrored_message_users(request, user_profile, recipients):
|
2017-02-11 05:45:39 +01:00
|
|
|
# type: (HttpResponse, UserProfile, Iterable[Text]) -> Tuple[bool, Optional[UserProfile]]
|
2013-12-12 18:36:32 +01:00
|
|
|
if "sender" not in request.POST:
|
|
|
|
return (False, None)
|
|
|
|
|
|
|
|
sender_email = request.POST["sender"].strip().lower()
|
|
|
|
referenced_users = set([sender_email])
|
|
|
|
if request.POST['type'] == 'private':
|
|
|
|
for email in recipients:
|
|
|
|
referenced_users.add(email.lower())
|
|
|
|
|
|
|
|
if request.client.name == "zephyr_mirror":
|
2014-03-05 17:54:37 +01:00
|
|
|
user_check = same_realm_zephyr_user
|
2013-12-12 18:36:32 +01:00
|
|
|
fullname_function = compute_mit_user_fullname
|
|
|
|
elif request.client.name == "irc_mirror":
|
|
|
|
user_check = same_realm_irc_user
|
|
|
|
fullname_function = compute_irc_user_fullname
|
2014-02-28 20:53:54 +01:00
|
|
|
elif request.client.name in ("jabber_mirror", "JabberMirror"):
|
2014-03-05 17:51:35 +01:00
|
|
|
user_check = same_realm_jabber_user
|
2013-12-12 18:36:32 +01:00
|
|
|
fullname_function = compute_jabber_user_fullname
|
|
|
|
else:
|
|
|
|
# Unrecognized mirroring client
|
|
|
|
return (False, None)
|
|
|
|
|
|
|
|
for email in referenced_users:
|
|
|
|
# Check that all referenced users are in our realm:
|
|
|
|
if not user_check(user_profile, email):
|
|
|
|
return (False, None)
|
|
|
|
|
|
|
|
# Create users for the referenced users, if needed.
|
|
|
|
for email in referenced_users:
|
|
|
|
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
|
|
|
|
|
|
|
|
sender = get_user_profile_by_email(sender_email)
|
|
|
|
return (True, sender)
|
|
|
|
|
2014-03-05 17:54:37 +01:00
|
|
|
def same_realm_zephyr_user(user_profile, email):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (UserProfile, Text) -> bool
|
2016-07-27 01:45:29 +02:00
|
|
|
#
|
|
|
|
# Are the sender and recipient both addresses in the same Zephyr
|
|
|
|
# mirroring realm? We have to handle this specially, inferring
|
|
|
|
# the domain from the e-mail address, because the recipient may
|
|
|
|
# not existing in Zulip and we may need to make a stub Zephyr
|
|
|
|
# mirroring user on the fly.
|
2013-12-12 18:36:32 +01:00
|
|
|
try:
|
|
|
|
validators.validate_email(email)
|
|
|
|
except ValidationError:
|
|
|
|
return False
|
|
|
|
|
2016-11-11 21:13:30 +01:00
|
|
|
domain = email_to_domain(email)
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2017-01-21 08:19:03 +01:00
|
|
|
# Assumes allow_subdomains=False for all RealmAlias's corresponding to
|
|
|
|
# these realms.
|
2016-11-09 02:40:54 +01:00
|
|
|
return user_profile.realm.is_zephyr_mirror_realm and \
|
|
|
|
RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
def same_realm_irc_user(user_profile, email):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (UserProfile, Text) -> bool
|
2013-12-12 18:36:32 +01:00
|
|
|
# Check whether the target email address is an IRC user in the
|
|
|
|
# same realm as user_profile, i.e. if the domain were example.com,
|
|
|
|
# the IRC user would need to be username@irc.example.com
|
|
|
|
try:
|
|
|
|
validators.validate_email(email)
|
|
|
|
except ValidationError:
|
|
|
|
return False
|
|
|
|
|
2016-11-11 21:13:30 +01:00
|
|
|
domain = email_to_domain(email).replace("irc.", "")
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2017-01-21 08:19:03 +01:00
|
|
|
# Assumes allow_subdomains=False for all RealmAlias's corresponding to
|
|
|
|
# these realms.
|
2016-11-09 02:40:54 +01:00
|
|
|
return RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-03-05 17:51:35 +01:00
|
|
|
def same_realm_jabber_user(user_profile, email):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (UserProfile, Text) -> bool
|
2013-12-12 18:36:32 +01:00
|
|
|
try:
|
|
|
|
validators.validate_email(email)
|
|
|
|
except ValidationError:
|
|
|
|
return False
|
|
|
|
|
2016-11-11 05:50:34 +01:00
|
|
|
# If your Jabber users have a different email domain than the
|
|
|
|
# Zulip users, this is where you would do any translation.
|
2016-11-11 21:13:30 +01:00
|
|
|
domain = email_to_domain(email)
|
2016-11-09 02:40:54 +01:00
|
|
|
|
2017-01-21 08:19:03 +01:00
|
|
|
# Assumes allow_subdomains=False for all RealmAlias's corresponding to
|
|
|
|
# these realms.
|
2016-11-09 02:40:54 +01:00
|
|
|
return RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
# We do not @require_login for send_message_backend, since it is used
|
|
|
|
# both from the API and the web service. Code calling
|
|
|
|
# send_message_backend should either check the API key or check that
|
|
|
|
# the user is logged in.
|
|
|
|
@has_request_variables
|
|
|
|
def send_message_backend(request, user_profile,
|
|
|
|
message_type_name = REQ('type'),
|
2014-02-06 23:12:34 +01:00
|
|
|
message_to = REQ('to', converter=extract_recipients, default=[]),
|
2013-12-12 18:36:32 +01:00
|
|
|
forged = REQ(default=False),
|
|
|
|
subject_name = REQ('subject', lambda x: x.strip(), None),
|
|
|
|
message_content = REQ('content'),
|
2017-01-03 06:57:03 +01:00
|
|
|
realm_str = REQ('realm_str', default=None),
|
2014-06-24 06:36:34 +02:00
|
|
|
local_id = REQ(default=None),
|
2013-12-12 18:36:32 +01:00
|
|
|
queue_id = REQ(default=None)):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (HttpRequest, UserProfile, Text, List[Text], bool, Optional[Text], Text, Optional[Text], Optional[Text], Optional[Text]) -> HttpResponse
|
2013-12-12 18:36:32 +01:00
|
|
|
client = request.client
|
2016-02-08 03:59:38 +01:00
|
|
|
is_super_user = request.user.is_api_super_user
|
2013-12-12 18:36:32 +01:00
|
|
|
if forged and not is_super_user:
|
2016-05-25 15:02:02 +02:00
|
|
|
return json_error(_("User not authorized for this query"))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
|
|
|
realm = None
|
2017-01-03 06:57:03 +01:00
|
|
|
if realm_str and realm_str != user_profile.realm.string_id:
|
2013-12-12 18:36:32 +01:00
|
|
|
if not is_super_user:
|
|
|
|
# The email gateway bot needs to be able to send messages in
|
|
|
|
# any realm.
|
2016-05-25 15:02:02 +02:00
|
|
|
return json_error(_("User not authorized for this query"))
|
2017-01-04 05:30:48 +01:00
|
|
|
realm = get_realm(realm_str)
|
2013-12-12 18:36:32 +01:00
|
|
|
if not realm:
|
2017-01-03 06:57:03 +01:00
|
|
|
return json_error(_("Unknown realm %s") % (realm_str,))
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2014-02-28 20:53:54 +01:00
|
|
|
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
|
2013-12-12 18:36:32 +01:00
|
|
|
# Here's how security works for mirroring:
|
|
|
|
#
|
|
|
|
# For private messages, the message must be (1) both sent and
|
|
|
|
# received exclusively by users in your realm, and (2)
|
|
|
|
# received by the forwarding user.
|
|
|
|
#
|
|
|
|
# For stream messages, the message must be (1) being forwarded
|
|
|
|
# by an API superuser for your realm and (2) being sent to a
|
|
|
|
# mirrored stream (any stream for the Zephyr and Jabber
|
|
|
|
# mirrors, but only streams with names starting with a "#" for
|
|
|
|
# IRC mirrors)
|
|
|
|
#
|
|
|
|
# The security checks are split between the below code
|
|
|
|
# (especially create_mirrored_message_users which checks the
|
|
|
|
# same-realm constraint) and recipient_for_emails (which
|
|
|
|
# checks that PMs are received by the forwarding user)
|
|
|
|
if "sender" not in request.POST:
|
2016-05-25 15:02:02 +02:00
|
|
|
return json_error(_("Missing sender"))
|
2013-12-12 18:36:32 +01:00
|
|
|
if message_type_name != "private" and not is_super_user:
|
2016-05-25 15:02:02 +02:00
|
|
|
return json_error(_("User not authorized for this query"))
|
2013-12-12 18:36:32 +01:00
|
|
|
(valid_input, mirror_sender) = \
|
|
|
|
create_mirrored_message_users(request, user_profile, message_to)
|
|
|
|
if not valid_input:
|
2016-05-25 15:02:02 +02:00
|
|
|
return json_error(_("Invalid mirrored message"))
|
2016-07-27 01:45:29 +02:00
|
|
|
if client.name == "zephyr_mirror" and not user_profile.realm.is_zephyr_mirror_realm:
|
2016-05-25 15:02:02 +02:00
|
|
|
return json_error(_("Invalid mirrored realm"))
|
2013-12-12 18:36:32 +01:00
|
|
|
if (client.name == "irc_mirror" and message_type_name != "private" and
|
2016-12-03 18:19:09 +01:00
|
|
|
not message_to[0].startswith("#")):
|
2016-05-25 15:02:02 +02:00
|
|
|
return json_error(_("IRC stream names must start with #"))
|
2013-12-12 18:36:32 +01:00
|
|
|
sender = mirror_sender
|
|
|
|
else:
|
|
|
|
sender = user_profile
|
|
|
|
|
|
|
|
ret = check_send_message(sender, client, message_type_name, message_to,
|
|
|
|
subject_name, message_content, forged=forged,
|
|
|
|
forged_timestamp = request.POST.get('time'),
|
|
|
|
forwarder_user_profile=user_profile, realm=realm,
|
|
|
|
local_id=local_id, sender_queue_id=queue_id)
|
|
|
|
return json_success({"id": ret})
|
|
|
|
|
2017-02-20 00:19:29 +01:00
|
|
|
def json_update_message(request, user_profile, message_id):
|
|
|
|
# type: (HttpRequest, UserProfile, int) -> HttpResponse
|
|
|
|
return update_message_backend(request, user_profile)
|
|
|
|
|
|
|
|
def fill_edit_history_entries(message_history, message):
|
|
|
|
# type: (List[Dict[str, Any]], Message) -> None
|
|
|
|
"""This fills out the message edit history entries from the database,
|
|
|
|
which are designed to have the minimum data possible, to instead
|
|
|
|
have the current topic + content as of that time, plus data on
|
|
|
|
whatever changed. This makes it much simpler to do future
|
|
|
|
processing.
|
|
|
|
|
|
|
|
Note that this mutates what is passed to it, which is sorta a bad pattern.
|
|
|
|
"""
|
|
|
|
prev_content = message.content
|
|
|
|
prev_rendered_content = message.rendered_content
|
|
|
|
prev_topic = message.subject
|
2017-02-20 01:44:12 +01:00
|
|
|
assert(datetime_to_timestamp(message.last_edit_time) == message_history[0]['timestamp'])
|
|
|
|
|
2017-02-20 00:19:29 +01:00
|
|
|
for entry in message_history:
|
|
|
|
entry['topic'] = prev_topic
|
|
|
|
if 'prev_subject' in entry:
|
|
|
|
# We replace use of 'subject' with 'topic' for downstream simplicity
|
|
|
|
prev_topic = entry['prev_subject']
|
|
|
|
entry['prev_topic'] = prev_topic
|
|
|
|
del entry['prev_subject']
|
|
|
|
|
|
|
|
entry['content'] = prev_content
|
|
|
|
entry['rendered_content'] = prev_rendered_content
|
|
|
|
if 'prev_content' in entry:
|
|
|
|
del entry['prev_rendered_content_version']
|
|
|
|
prev_content = entry['prev_content']
|
|
|
|
prev_rendered_content = entry['prev_rendered_content']
|
|
|
|
entry['content_html_diff'] = highlight_html_differences(
|
|
|
|
prev_rendered_content,
|
|
|
|
entry['rendered_content'])
|
|
|
|
|
2017-02-20 01:44:12 +01:00
|
|
|
message_history.append(dict(
|
|
|
|
topic = prev_topic,
|
|
|
|
content = prev_content,
|
|
|
|
rendered_content = prev_rendered_content,
|
|
|
|
timestamp = datetime_to_timestamp(message.pub_date),
|
|
|
|
user_id = message.sender_id,
|
|
|
|
))
|
2017-02-20 00:19:29 +01:00
|
|
|
|
|
|
|
@has_request_variables
|
|
|
|
def get_message_edit_history(request, user_profile,
|
|
|
|
message_id=REQ(converter=to_non_negative_int)):
|
|
|
|
# type: (HttpRequest, UserProfile, int) -> HttpResponse
|
|
|
|
message, ignored_user_message = access_message(user_profile, message_id)
|
|
|
|
|
|
|
|
# Extract the message edit history from the message
|
|
|
|
message_edit_history = ujson.loads(message.edit_history)
|
|
|
|
|
|
|
|
# Fill in all the extra data that will make it usable
|
|
|
|
fill_edit_history_entries(message_edit_history, message)
|
2017-02-20 01:46:07 +01:00
|
|
|
return json_success({"message_history": reversed(message_edit_history)})
|
2017-02-20 00:19:29 +01:00
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
@has_request_variables
|
|
|
|
def update_message_backend(request, user_profile,
|
|
|
|
message_id=REQ(converter=to_non_negative_int),
|
|
|
|
subject=REQ(default=None),
|
|
|
|
propagate_mode=REQ(default="change_one"),
|
|
|
|
content=REQ(default=None)):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (HttpRequest, UserProfile, int, Optional[Text], Optional[str], Optional[Text]) -> HttpResponse
|
2016-06-21 21:34:41 +02:00
|
|
|
if not user_profile.realm.allow_message_editing:
|
|
|
|
return json_error(_("Your organization has turned off message editing."))
|
2016-07-11 03:01:03 +02:00
|
|
|
|
2016-06-21 21:34:41 +02:00
|
|
|
try:
|
|
|
|
message = Message.objects.select_related().get(id=message_id)
|
|
|
|
except Message.DoesNotExist:
|
|
|
|
raise JsonableError(_("Unknown message id"))
|
|
|
|
|
|
|
|
# You only have permission to edit a message if:
|
|
|
|
# 1. You sent it, OR:
|
|
|
|
# 2. This is a topic-only edit for a (no topic) message, OR:
|
|
|
|
# 3. This is a topic-only edit and you are an admin.
|
|
|
|
if message.sender == user_profile:
|
|
|
|
pass
|
2016-07-16 18:01:25 +02:00
|
|
|
elif (content is None) and ((message.topic_name() == "(no topic)") or
|
2016-06-21 21:34:41 +02:00
|
|
|
user_profile.is_realm_admin):
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise JsonableError(_("You don't have permission to edit this message"))
|
|
|
|
|
|
|
|
# If there is a change to the content, check that it hasn't been too long
|
|
|
|
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
|
|
|
|
# past the limit, and in case there are network issues, etc. The 15 comes
|
|
|
|
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
|
|
|
|
# you change this value also change those two parameters in message_edit.js.
|
|
|
|
edit_limit_buffer = 20
|
2016-07-17 19:40:54 +02:00
|
|
|
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
|
|
|
|
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
|
|
|
|
if (now() - message.pub_date) > datetime.timedelta(seconds=deadline_seconds):
|
|
|
|
raise JsonableError(_("The time limit for editing this message has past"))
|
2016-06-21 21:34:41 +02:00
|
|
|
|
2016-07-11 03:01:03 +02:00
|
|
|
if subject is None and content is None:
|
|
|
|
return json_error(_("Nothing to change"))
|
2016-07-11 02:52:06 +02:00
|
|
|
if subject is not None:
|
|
|
|
subject = subject.strip()
|
|
|
|
if subject == "":
|
|
|
|
raise JsonableError(_("Topic can't be empty"))
|
2016-06-21 21:34:41 +02:00
|
|
|
rendered_content = None
|
2016-12-25 00:44:26 +01:00
|
|
|
links_for_embed = set() # type: Set[Text]
|
2016-07-11 02:58:23 +02:00
|
|
|
if content is not None:
|
|
|
|
content = content.strip()
|
|
|
|
if content == "":
|
2016-11-09 22:16:12 +01:00
|
|
|
content = "(deleted)"
|
2016-06-21 21:34:41 +02:00
|
|
|
content = truncate_body(content)
|
2016-09-14 21:58:44 +02:00
|
|
|
|
2016-09-15 00:24:44 +02:00
|
|
|
# We exclude UserMessage.flags.historical rows since those
|
|
|
|
# users did not receive the message originally, and thus
|
|
|
|
# probably are not relevant for reprocessed alert_words,
|
|
|
|
# mentions and similar rendering features. This may be a
|
|
|
|
# decision we change in the future.
|
2017-01-24 02:07:51 +01:00
|
|
|
ums = UserMessage.objects.filter(
|
|
|
|
message=message.id,
|
|
|
|
flags=~UserMessage.flags.historical)
|
|
|
|
|
|
|
|
message_users = UserProfile.objects.select_related().filter(
|
|
|
|
id__in={um.user_profile_id for um in ums})
|
|
|
|
|
2017-01-22 05:55:30 +01:00
|
|
|
# We render the message using the current user's realm; since
|
|
|
|
# the cross-realm bots never edit messages, this should be
|
|
|
|
# always correct.
|
|
|
|
# Note: If rendering fails, the called code will raise a JsonableError.
|
2016-09-15 00:24:44 +02:00
|
|
|
rendered_content = render_incoming_message(message,
|
2017-01-22 05:55:30 +01:00
|
|
|
content,
|
|
|
|
message_users,
|
|
|
|
user_profile.realm)
|
2016-10-27 12:06:44 +02:00
|
|
|
links_for_embed |= message.links_for_preview
|
2016-07-11 03:01:03 +02:00
|
|
|
|
2017-01-24 02:07:12 +01:00
|
|
|
number_changed = do_update_message(user_profile, message, subject,
|
|
|
|
propagate_mode, content, rendered_content)
|
|
|
|
# Include the number of messages changed in the logs
|
|
|
|
request._log_data['extra'] = "[%s]" % (number_changed,)
|
2016-10-27 12:06:44 +02:00
|
|
|
if links_for_embed and getattr(settings, 'INLINE_URL_EMBED_PREVIEW', None):
|
|
|
|
event_data = {
|
|
|
|
'message_id': message.id,
|
|
|
|
'message_content': message.content,
|
|
|
|
'urls': links_for_embed}
|
|
|
|
queue_json_publish('embed_links', event_data, lambda x: None)
|
2013-12-12 18:36:32 +01:00
|
|
|
return json_success()
|
|
|
|
|
|
|
|
@has_request_variables
|
|
|
|
def json_fetch_raw_message(request, user_profile,
|
|
|
|
message_id=REQ(converter=to_non_negative_int)):
|
2016-06-06 00:32:39 +02:00
|
|
|
# type: (HttpRequest, UserProfile, int) -> HttpResponse
|
2016-10-12 02:14:08 +02:00
|
|
|
(message, user_message) = access_message(user_profile, message_id)
|
2013-12-12 18:36:32 +01:00
|
|
|
return json_success({"raw_content": message.content})
|
|
|
|
|
|
|
|
@has_request_variables
|
2016-05-31 16:29:39 +02:00
|
|
|
def render_message_backend(request, user_profile, content=REQ()):
|
2016-12-25 00:44:26 +01:00
|
|
|
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
|
2016-11-07 20:40:40 +01:00
|
|
|
message = Message()
|
|
|
|
message.sender = user_profile
|
|
|
|
message.content = content
|
|
|
|
message.sending_client = request.client
|
|
|
|
|
2017-01-18 23:19:18 +01:00
|
|
|
rendered_content = render_markdown(message, content, realm=user_profile.realm)
|
2013-12-12 18:36:32 +01:00
|
|
|
return json_success({"rendered": rendered_content})
|
|
|
|
|
|
|
|
@authenticated_json_post_view
|
|
|
|
def json_messages_in_narrow(request, user_profile):
|
2016-07-30 01:27:56 +02:00
|
|
|
# type: (HttpRequest, UserProfile) -> HttpResponse
|
2013-12-12 18:36:32 +01:00
|
|
|
return messages_in_narrow_backend(request, user_profile)
|
|
|
|
|
|
|
|
@has_request_variables
|
|
|
|
def messages_in_narrow_backend(request, user_profile,
|
|
|
|
msg_ids = REQ(validator=check_list(check_int)),
|
|
|
|
narrow = REQ(converter=narrow_parameter)):
|
2016-07-30 01:27:56 +02:00
|
|
|
# type: (HttpRequest, UserProfile, List[int], List[Dict[str, Any]]) -> HttpResponse
|
|
|
|
|
2013-12-12 18:36:32 +01:00
|
|
|
# Note that this function will only work on messages the user
|
|
|
|
# actually received
|
|
|
|
|
2013-12-13 23:55:04 +01:00
|
|
|
# TODO: We assume that the narrow is a search. For now this works because
|
|
|
|
# the browser only ever calls this function for searches, since it can't
|
|
|
|
# apply that narrow operator itself.
|
|
|
|
|
2013-12-10 23:32:29 +01:00
|
|
|
query = select([column("message_id"), column("subject"), column("rendered_content")],
|
|
|
|
and_(column("user_profile_id") == literal(user_profile.id),
|
|
|
|
column("message_id").in_(msg_ids)),
|
2017-02-22 22:13:57 +01:00
|
|
|
join(table("zerver_usermessage"), table("zerver_message"),
|
2013-12-10 23:32:29 +01:00
|
|
|
literal_column("zerver_usermessage.message_id") ==
|
|
|
|
literal_column("zerver_message.id")))
|
|
|
|
|
2014-02-27 20:09:59 +01:00
|
|
|
builder = NarrowBuilder(user_profile, column("message_id"))
|
2014-02-10 21:45:53 +01:00
|
|
|
for term in narrow:
|
2014-02-27 20:09:59 +01:00
|
|
|
query = builder.add_term(query, term)
|
2013-12-12 18:36:32 +01:00
|
|
|
|
2013-12-10 23:32:29 +01:00
|
|
|
sa_conn = get_sqlalchemy_connection()
|
|
|
|
query_result = list(sa_conn.execute(query).fetchall())
|
|
|
|
|
|
|
|
search_fields = dict()
|
|
|
|
for row in query_result:
|
|
|
|
(message_id, subject, rendered_content, content_matches, subject_matches) = row
|
|
|
|
search_fields[message_id] = get_search_fields(rendered_content, subject,
|
|
|
|
content_matches, subject_matches)
|
|
|
|
|
|
|
|
return json_success({"messages": search_fields})
|