unread_ops: Provide more responsive feedback flagging msgs read/unread.

Processing 1k messages takes roughly 1s on a live server like
chat.zulip.org, which is a good enough duration after which
user will be looking to get some feedback on the request. Once
we show the in-progress banner, it makes sense to do bigger
batches to speed up the process.
This commit is contained in:
Aman Agrawal 2022-11-17 18:20:17 +00:00 committed by Tim Abbott
parent 59633e36cb
commit f0d87fcf6e
1 changed files with 16 additions and 5 deletions

View File

@ -18,9 +18,16 @@ import * as ui_report from "./ui_report";
import * as unread from "./unread"; import * as unread from "./unread";
import * as unread_ui from "./unread_ui"; import * as unread_ui from "./unread_ui";
const NUM_OF_MESSAGES_UPDATED_PER_BATCH = 5000;
let loading_indicator_displayed = false; let loading_indicator_displayed = false;
// We might want to use a slightly smaller batch for the first
// request, because empirically, the first request can be
// significantly slower, likely due to the database warming up its
// cache with your UserMessage rows. We don't do that, just because
// the progress indicator experience of 1000, 3000, etc. feels weird.
const INITIAL_BATCH_SIZE = 1000;
const FOLLOWUP_BATCH_SIZE = 1000;
export function mark_all_as_read(args = {}) { export function mark_all_as_read(args = {}) {
args = { args = {
// We use an anchor of "oldest", not "first_unread", because // We use an anchor of "oldest", not "first_unread", because
@ -29,6 +36,7 @@ export function mark_all_as_read(args = {}) {
// unread not being processed. // unread not being processed.
anchor: "oldest", anchor: "oldest",
messages_read_till_now: 0, messages_read_till_now: 0,
num_after: INITIAL_BATCH_SIZE,
...args, ...args,
}; };
const request = { const request = {
@ -39,7 +47,7 @@ export function mark_all_as_read(args = {}) {
// unconditionally false. // unconditionally false.
include_anchor: false, include_anchor: false,
num_before: 0, num_before: 0,
num_after: NUM_OF_MESSAGES_UPDATED_PER_BATCH, num_after: args.num_after,
op: "add", op: "add",
flag: "read", flag: "read",
// Since there's a database index on is:unread, it's a fast // Since there's a database index on is:unread, it's a fast
@ -76,6 +84,7 @@ export function mark_all_as_read(args = {}) {
mark_all_as_read({ mark_all_as_read({
anchor: data.last_processed_id, anchor: data.last_processed_id,
messages_read_till_now, messages_read_till_now,
num_after: FOLLOWUP_BATCH_SIZE,
}); });
} else { } else {
if (loading_indicator_displayed) { if (loading_indicator_displayed) {
@ -133,8 +142,9 @@ function process_newly_read_message(message, options) {
export function mark_as_unread_from_here( export function mark_as_unread_from_here(
message_id, message_id,
include_message = true, include_anchor = true,
messages_marked_unread_till_now = 0, messages_marked_unread_till_now = 0,
num_after = INITIAL_BATCH_SIZE - 1,
narrow, narrow,
) { ) {
if (narrow === undefined) { if (narrow === undefined) {
@ -143,9 +153,9 @@ export function mark_as_unread_from_here(
message_lists.current.prevent_reading(); message_lists.current.prevent_reading();
const opts = { const opts = {
anchor: message_id, anchor: message_id,
include_anchor: include_message, include_anchor,
num_before: 0, num_before: 0,
num_after: NUM_OF_MESSAGES_UPDATED_PER_BATCH, num_after,
narrow, narrow,
op: "remove", op: "remove",
flag: "read", flag: "read",
@ -180,6 +190,7 @@ export function mark_as_unread_from_here(
data.last_processed_id, data.last_processed_id,
false, false,
messages_marked_unread_till_now, messages_marked_unread_till_now,
FOLLOWUP_BATCH_SIZE,
narrow, narrow,
); );
} else if (loading_indicator_displayed) { } else if (loading_indicator_displayed) {