zulip/static/js/message_fetch.js

487 lines
17 KiB
JavaScript
Raw Normal View History

import * as channel from "./channel";
import {Filter} from "./filter";
import * as huddle_data from "./huddle_data";
import * as message_scroll from "./message_scroll";
import * as message_store from "./message_store";
import * as message_util from "./message_util";
import * as people from "./people";
import * as pm_list from "./pm_list";
import * as recent_topics from "./recent_topics";
import * as server_events from "./server_events";
import * as stream_data from "./stream_data";
import * as stream_list from "./stream_list";
import * as ui_report from "./ui_report";
const consts = {
backfill_idle_time: 10 * 1000,
error_retry_time: 5000,
backfill_batch_size: 1000,
narrow_before: 50,
narrow_after: 50,
num_before_home_anchor: 200,
num_after_home_anchor: 200,
recent_topics_initial_fetch_size: 400,
backward_batch_size: 100,
forward_batch_size: 100,
catch_up_batch_size: 1000,
};
function process_result(data, opts) {
let messages = data.messages;
if (!$("#connection-error").hasClass("get-events-error")) {
ui_report.hide_error($("#connection-error"));
}
2017-03-19 18:19:48 +01:00
if (
messages.length === 0 &&
current_msg_list === message_list.narrowed &&
message_list.narrowed.empty()
) {
2017-03-19 18:19:48 +01:00
// Even after trying to load more messages, we have no
// messages to display in this narrow.
narrow.show_empty_narrow_message();
}
messages = messages.map((message) => {
message_store.set_message_booleans(message);
return message_store.add_message_metadata(message);
});
2017-03-19 18:19:48 +01:00
// In case any of the newly fetched messages are new, add them to
// our unread data structures. It's important that this run even
// when fetching in a narrow, since we might return unread
// messages that aren't in the home view data set (e.g. on a muted
// stream).
message_util.do_unread_count_updates(messages);
2017-03-19 18:19:48 +01:00
// If we're loading more messages into the home view, save them to
// the message_list.all as well, as the home_msg_list is reconstructed
// from message_list.all.
if (opts.msg_list === home_msg_list) {
message_util.add_old_messages(messages, message_list.all);
2017-03-19 18:19:48 +01:00
}
if (messages.length !== 0) {
message_util.add_old_messages(messages, opts.msg_list);
2017-03-19 18:19:48 +01:00
}
huddle_data.process_loaded_messages(messages);
2017-03-19 18:19:48 +01:00
stream_list.update_streams_sidebar();
pm_list.update_private_messages();
recent_topics.process_messages(messages);
stream_list.maybe_scroll_narrow_into_view();
2017-03-19 18:19:48 +01:00
if (opts.cont !== undefined) {
message view: Fetch again when "newest" is discarded. The previous commit introduced a bug where it was not intuitive for the user to scroll again. For the current narrow, new messages were fetched again only when scrolled to the bottom as usually there are many messages displayed. However when the edge case mentioned in the previous commit occured, it was not very obvious that a scroll should be done or we could already be at the bottom and could not scroll again to trigger a fetch. `message_viewport.at_bottom` has a relevant comment explaining this behaviour. The previous commit handled the rare race condition. However, there is a possibility that the rare race condition might occur again while we are handling the previous condition. This commit resolves these 2 problems by performing a re-fetch while also resetting the `expected_max_message_id` and this approach has two benefits: 1. The reset prevents an infinite loop, if somehow the expected max message's id gets corrupted resulting in a situation where the server can never send an id greater than that even after fetching. 2. Even though we stop after just one re-fetch the race condition might recursively occur while we handle the previous race condition. And even though the reset prevents multiple re-fetches, we don't have the missing message problem. This is because we treat the next race condition as a new race condition instead of it being a continuation of the previous. The `expected_max_message_id` gets updated again, on receiving a new message. Thus it can again enter the `fetch_status` block as the reset value is updated again.
2020-06-16 17:58:37 +02:00
opts.cont(data, opts);
2017-03-19 18:19:48 +01:00
}
}
function get_messages_success(data, opts) {
const update_loading_indicator = opts.msg_list === current_msg_list;
if (opts.num_before > 0) {
opts.msg_list.data.fetch_status.finish_older_batch({
update_loading_indicator,
found_oldest: data.found_oldest,
history_limited: data.history_limited,
});
if (opts.msg_list === home_msg_list) {
// When we update home_msg_list, we need to also update
// the fetch_status data structure for message_list.all,
// which is never rendered (and just used for
// prepopulating narrowed views).
message_list.all.data.fetch_status.finish_older_batch({
update_loading_indicator: false,
found_oldest: data.found_oldest,
history_limited: data.history_limited,
});
}
message_scroll.update_top_of_narrow_notices(opts.msg_list);
}
if (opts.num_after > 0) {
opts.fetch_again = opts.msg_list.data.fetch_status.finish_newer_batch(data.messages, {
update_loading_indicator,
found_newest: data.found_newest,
});
if (opts.msg_list === home_msg_list) {
// When we update home_msg_list, we need to also update
// the fetch_status data structure for message_list.all,
// which is never rendered (and just used for
// prepopulating narrowed views).
message view: Fetch again when "newest" is discarded. The previous commit introduced a bug where it was not intuitive for the user to scroll again. For the current narrow, new messages were fetched again only when scrolled to the bottom as usually there are many messages displayed. However when the edge case mentioned in the previous commit occured, it was not very obvious that a scroll should be done or we could already be at the bottom and could not scroll again to trigger a fetch. `message_viewport.at_bottom` has a relevant comment explaining this behaviour. The previous commit handled the rare race condition. However, there is a possibility that the rare race condition might occur again while we are handling the previous condition. This commit resolves these 2 problems by performing a re-fetch while also resetting the `expected_max_message_id` and this approach has two benefits: 1. The reset prevents an infinite loop, if somehow the expected max message's id gets corrupted resulting in a situation where the server can never send an id greater than that even after fetching. 2. Even though we stop after just one re-fetch the race condition might recursively occur while we handle the previous race condition. And even though the reset prevents multiple re-fetches, we don't have the missing message problem. This is because we treat the next race condition as a new race condition instead of it being a continuation of the previous. The `expected_max_message_id` gets updated again, on receiving a new message. Thus it can again enter the `fetch_status` block as the reset value is updated again.
2020-06-16 17:58:37 +02:00
opts.fetch_again = message_list.all.data.fetch_status.finish_newer_batch(
data.messages,
{
message view: Fetch again when "newest" is discarded. The previous commit introduced a bug where it was not intuitive for the user to scroll again. For the current narrow, new messages were fetched again only when scrolled to the bottom as usually there are many messages displayed. However when the edge case mentioned in the previous commit occured, it was not very obvious that a scroll should be done or we could already be at the bottom and could not scroll again to trigger a fetch. `message_viewport.at_bottom` has a relevant comment explaining this behaviour. The previous commit handled the rare race condition. However, there is a possibility that the rare race condition might occur again while we are handling the previous condition. This commit resolves these 2 problems by performing a re-fetch while also resetting the `expected_max_message_id` and this approach has two benefits: 1. The reset prevents an infinite loop, if somehow the expected max message's id gets corrupted resulting in a situation where the server can never send an id greater than that even after fetching. 2. Even though we stop after just one re-fetch the race condition might recursively occur while we handle the previous race condition. And even though the reset prevents multiple re-fetches, we don't have the missing message problem. This is because we treat the next race condition as a new race condition instead of it being a continuation of the previous. The `expected_max_message_id` gets updated again, on receiving a new message. Thus it can again enter the `fetch_status` block as the reset value is updated again.
2020-06-16 17:58:37 +02:00
update_loading_indicator: false,
found_newest: data.found_newest,
},
);
}
}
2017-03-19 18:19:48 +01:00
if (opts.msg_list.narrowed && opts.msg_list !== current_msg_list) {
// We unnarrowed before receiving new messages so
// don't bother processing the newly arrived messages.
return;
}
if (!data) {
// The server occasionally returns no data during a
2017-03-19 18:19:48 +01:00
// restart. Ignore those responses and try again
setTimeout(() => {
load_messages(opts);
2017-03-19 18:19:48 +01:00
}, 0);
return;
}
process_result(data, opts);
2017-03-19 18:19:48 +01:00
}
// This function modifies the data.narrow filters to use user IDs
// instead of emails string if it is supported. We currently don't set
// or convert the emails string to user IDs directly into the Filter code
// because doing so breaks the app in various modules that expect emails string.
function handle_operators_supporting_id_based_api(data) {
const operators_supporting_ids = new Set(["pm-with"]);
const operators_supporting_id = new Set(["sender", "group-pm-with", "stream"]);
if (data.narrow === undefined) {
return data;
}
data.narrow = JSON.parse(data.narrow);
data.narrow = data.narrow.map((filter) => {
if (operators_supporting_ids.has(filter.operator)) {
filter.operand = people.emails_strings_to_user_ids_array(filter.operand);
}
if (operators_supporting_id.has(filter.operator)) {
if (filter.operator === "stream") {
const stream_id = stream_data.get_stream_id(filter.operand);
if (stream_id !== undefined) {
filter.operand = stream_id;
}
return filter;
}
// The other operands supporting object IDs all work with user objects.
const person = people.get_by_email(filter.operand);
if (person !== undefined) {
filter.operand = person.user_id;
}
}
return filter;
});
data.narrow = JSON.stringify(data.narrow);
return data;
}
2017-03-19 18:19:48 +01:00
export function load_messages(opts) {
if (typeof opts.anchor === "number") {
// Messages that have been locally echoed messages have
// floating point temporary IDs, which is intended to be a.
// completely client-side detail. We need to round these to
// the nearest integer before sending a request to the server.
opts.anchor = opts.anchor.toFixed();
}
let data = {anchor: opts.anchor, num_before: opts.num_before, num_after: opts.num_after};
2017-03-19 18:19:48 +01:00
// This block is a hack; structurally, we want to set
// data.narrow = opts.msg_list.data.filter.public_operators()
//
// But support for the message_list.all sharing of data with
// home_msg_list and the (hacky) page_params.narrow feature
// requires a somewhat ugly bundle of conditionals.
if (opts.msg_list === home_msg_list) {
if (page_params.narrow_stream !== undefined) {
data.narrow = JSON.stringify(page_params.narrow);
}
// Otherwise, we don't pass narrow for home_msg_list; this is
// required because it shares its data with all_msg_list, and
// so we need the server to send us message history from muted
// streams and topics even though home_msg_list's in:home
// operators will filter those.
} else {
let operators = opts.msg_list.data.filter.public_operators();
2017-03-19 18:19:48 +01:00
if (page_params.narrow !== undefined) {
operators = operators.concat(page_params.narrow);
}
data.narrow = JSON.stringify(operators);
}
let update_loading_indicator = opts.msg_list === current_msg_list;
if (opts.num_before > 0) {
opts.msg_list.data.fetch_status.start_older_batch({
update_loading_indicator,
});
if (opts.msg_list === home_msg_list) {
message_list.all.data.fetch_status.start_older_batch({
update_loading_indicator,
});
}
}
if (opts.num_after > 0) {
// We hide the bottom loading indicator when we're fetching both top and bottom messages.
update_loading_indicator = update_loading_indicator && opts.num_before === 0;
opts.msg_list.data.fetch_status.start_newer_batch({
update_loading_indicator,
});
if (opts.msg_list === home_msg_list) {
message_list.all.data.fetch_status.start_newer_batch({
update_loading_indicator,
});
}
}
data.client_gravatar = true;
data = handle_operators_supporting_id_based_api(data);
2017-03-19 18:19:48 +01:00
channel.get({
url: "/json/messages",
data,
2017-03-19 18:19:48 +01:00
idempotent: true,
success(data) {
get_messages_success(data, opts);
2017-03-19 18:19:48 +01:00
},
error(xhr) {
2017-03-19 18:19:48 +01:00
if (opts.msg_list.narrowed && opts.msg_list !== current_msg_list) {
// We unnarrowed before getting an error so don't
// bother trying again or doing further processing.
return;
}
if (xhr.status === 400) {
// Bad request: We probably specified a narrow operator
// for a nonexistent stream or something. We shouldn't
// retry or display a connection error.
//
// FIXME: Warn the user when this has happened?
message_scroll.hide_indicators();
const data = {
messages: [],
};
process_result(data, opts);
2017-03-19 18:19:48 +01:00
return;
}
// We might want to be more clever here
$("#connection-error").addClass("show");
setTimeout(() => {
load_messages(opts);
}, consts.error_retry_time);
2017-03-19 18:19:48 +01:00
},
});
}
2017-03-19 18:19:48 +01:00
export function load_messages_for_narrow(opts) {
const msg_list = message_list.narrowed;
2018-03-16 14:15:30 +01:00
load_messages({
anchor: opts.anchor,
num_before: consts.narrow_before,
num_after: consts.narrow_after,
msg_list,
cont: opts.cont,
});
}
2017-03-19 18:19:48 +01:00
export function get_backfill_anchor(msg_list) {
if (msg_list === home_msg_list) {
msg_list = message_list.all;
}
const oldest_msg = msg_list.first();
if (oldest_msg) {
return oldest_msg.id;
}
// msg_list is empty, which is an impossible
// case, raise a fatal error.
throw new Error("There are no message available to backfill.");
}
export function get_frontfill_anchor(msg_list) {
if (msg_list === home_msg_list) {
msg_list = message_list.all;
}
const last_msg = msg_list.last();
if (last_msg) {
return last_msg.id;
}
// Although it is impossible that we reach here since we
// are already checking `msg_list.fetch_status.can_load_newer_messages`
// and user cannot be scrolling down on an empty message_list to
// fetch more data, and if user is, then the available data is wrong
// and we raise a fatal error.
throw new Error("There are no message available to frontfill.");
}
export function maybe_load_older_messages(opts) {
// This function gets called when you scroll to the top
// of your window, and you want to get messages older
// than what the browsers originally fetched.
const msg_list = opts.msg_list;
if (!msg_list.data.fetch_status.can_load_older_messages()) {
// We may already be loading old messages or already
// got the oldest one.
2017-03-19 18:19:48 +01:00
return;
}
do_backfill({
msg_list,
num_before: consts.backward_batch_size,
});
}
export function do_backfill(opts) {
const msg_list = opts.msg_list;
const anchor = get_backfill_anchor(msg_list);
load_messages({
anchor,
num_before: opts.num_before,
2017-03-19 18:19:48 +01:00
num_after: 0,
msg_list,
cont() {
if (opts.cont) {
opts.cont();
}
2017-03-19 18:19:48 +01:00
},
});
}
2017-03-19 18:19:48 +01:00
export function maybe_load_newer_messages(opts) {
// This function gets called when you scroll to the bottom
// of your window, and you want to get messages newer
// than what the browsers originally fetched.
const msg_list = opts.msg_list;
if (!msg_list.data.fetch_status.can_load_newer_messages()) {
// We may already be loading new messages or already
// got the newest one.
return;
}
const anchor = get_frontfill_anchor(msg_list);
message view: Fetch again when "newest" is discarded. The previous commit introduced a bug where it was not intuitive for the user to scroll again. For the current narrow, new messages were fetched again only when scrolled to the bottom as usually there are many messages displayed. However when the edge case mentioned in the previous commit occured, it was not very obvious that a scroll should be done or we could already be at the bottom and could not scroll again to trigger a fetch. `message_viewport.at_bottom` has a relevant comment explaining this behaviour. The previous commit handled the rare race condition. However, there is a possibility that the rare race condition might occur again while we are handling the previous condition. This commit resolves these 2 problems by performing a re-fetch while also resetting the `expected_max_message_id` and this approach has two benefits: 1. The reset prevents an infinite loop, if somehow the expected max message's id gets corrupted resulting in a situation where the server can never send an id greater than that even after fetching. 2. Even though we stop after just one re-fetch the race condition might recursively occur while we handle the previous race condition. And even though the reset prevents multiple re-fetches, we don't have the missing message problem. This is because we treat the next race condition as a new race condition instead of it being a continuation of the previous. The `expected_max_message_id` gets updated again, on receiving a new message. Thus it can again enter the `fetch_status` block as the reset value is updated again.
2020-06-16 17:58:37 +02:00
function load_more(data, args) {
if (args.fetch_again && args.msg_list === current_msg_list) {
maybe_load_newer_messages({msg_list: current_msg_list});
message view: Fetch again when "newest" is discarded. The previous commit introduced a bug where it was not intuitive for the user to scroll again. For the current narrow, new messages were fetched again only when scrolled to the bottom as usually there are many messages displayed. However when the edge case mentioned in the previous commit occured, it was not very obvious that a scroll should be done or we could already be at the bottom and could not scroll again to trigger a fetch. `message_viewport.at_bottom` has a relevant comment explaining this behaviour. The previous commit handled the rare race condition. However, there is a possibility that the rare race condition might occur again while we are handling the previous condition. This commit resolves these 2 problems by performing a re-fetch while also resetting the `expected_max_message_id` and this approach has two benefits: 1. The reset prevents an infinite loop, if somehow the expected max message's id gets corrupted resulting in a situation where the server can never send an id greater than that even after fetching. 2. Even though we stop after just one re-fetch the race condition might recursively occur while we handle the previous race condition. And even though the reset prevents multiple re-fetches, we don't have the missing message problem. This is because we treat the next race condition as a new race condition instead of it being a continuation of the previous. The `expected_max_message_id` gets updated again, on receiving a new message. Thus it can again enter the `fetch_status` block as the reset value is updated again.
2020-06-16 17:58:37 +02:00
}
}
load_messages({
anchor,
num_before: 0,
num_after: consts.forward_batch_size,
msg_list,
message view: Fetch again when "newest" is discarded. The previous commit introduced a bug where it was not intuitive for the user to scroll again. For the current narrow, new messages were fetched again only when scrolled to the bottom as usually there are many messages displayed. However when the edge case mentioned in the previous commit occured, it was not very obvious that a scroll should be done or we could already be at the bottom and could not scroll again to trigger a fetch. `message_viewport.at_bottom` has a relevant comment explaining this behaviour. The previous commit handled the rare race condition. However, there is a possibility that the rare race condition might occur again while we are handling the previous condition. This commit resolves these 2 problems by performing a re-fetch while also resetting the `expected_max_message_id` and this approach has two benefits: 1. The reset prevents an infinite loop, if somehow the expected max message's id gets corrupted resulting in a situation where the server can never send an id greater than that even after fetching. 2. Even though we stop after just one re-fetch the race condition might recursively occur while we handle the previous race condition. And even though the reset prevents multiple re-fetches, we don't have the missing message problem. This is because we treat the next race condition as a new race condition instead of it being a continuation of the previous. The `expected_max_message_id` gets updated again, on receiving a new message. Thus it can again enter the `fetch_status` block as the reset value is updated again.
2020-06-16 17:58:37 +02:00
cont: load_more,
});
}
export function start_backfilling_messages() {
2018-03-20 15:32:43 +01:00
// backfill more messages after the user is idle
$(document).idle({
idle: consts.backfill_idle_time,
onIdle() {
do_backfill({
num_before: consts.backfill_batch_size,
msg_list: home_msg_list,
});
},
});
}
2018-03-20 15:32:43 +01:00
export function initialize() {
2017-03-19 18:19:48 +01:00
// get the initial message list
function load_more(data) {
// If we haven't selected a message in the home view yet, and
// the home view isn't empty, we select the anchor message here.
2017-03-19 18:19:48 +01:00
if (home_msg_list.selected_id() === -1 && !home_msg_list.empty()) {
// We fall back to the closest selected id, as the user
// may have removed a stream from the home view while we
// were loading data.
home_msg_list.select_id(data.anchor, {
then_scroll: true,
use_closest: true,
target_scroll_offset: page_params.initial_offset,
});
2017-03-19 18:19:48 +01:00
}
if (data.found_newest) {
server_events.home_view_loaded();
start_backfilling_messages();
return;
2017-03-19 18:19:48 +01:00
}
// If we fall through here, we need to keep fetching more data, and
// we'll call back to the function we're in.
const messages = data.messages;
const latest_id = messages[messages.length - 1].id;
load_messages({
anchor: latest_id,
num_before: 0,
num_after: consts.catch_up_batch_size,
msg_list: home_msg_list,
cont: load_more,
});
2017-03-19 18:19:48 +01:00
}
let anchor;
if (page_params.initial_pointer) {
// If we're doing a server-initiated reload, similar to a
// near: narrow query, we want to select a specific message.
anchor = page_params.initial_pointer;
} else {
// Otherwise, we should just use the first unread message in
// the user's unmuted history as our anchor.
anchor = "first_unread";
}
load_messages({
anchor,
num_before: consts.num_before_home_anchor,
num_after: consts.num_after_home_anchor,
msg_list: home_msg_list,
cont: load_more,
});
// In addition to the algorithm above, which is designed to ensure
// that we fetch all message history eventually starting with the
// first unread message, we also need to ensure that the Recent
// Topics page contains the very most recent threads on page load.
//
// Long term, we'll want to replace this with something that's
// more performant (i.e. avoids this unnecessary extra fetch the
// results of which are basically discarded) and better represents
// more than a few hundred messages' history, but this strategy
// allows "Recent Topics" to always show current data (with gaps)
// on page load; the data will be complete once the algorithm
// above catched up to present.
//
// (Users will see a weird artifact where Recent Topics has a gap
// between E.g. 6 days ago and 37 days ago while the catchup
// process runs, so this strategy still results in problematic
// visual artifacts shortly after page load; just more forgiveable
// ones).
//
// This MessageList is defined similarly to home_message_list,
// without a `table_name` attached.
const recent_topics_message_list = new message_list.MessageList({
filter: new Filter([{operator: "in", operand: "home"}]),
excludes_muted_topics: true,
});
load_messages({
anchor: "newest",
num_before: consts.recent_topics_initial_fetch_size,
num_after: 0,
msg_list: recent_topics_message_list,
});
}