2020-08-01 03:43:15 +02:00
|
|
|
"use strict";
|
|
|
|
|
2024-10-09 00:25:41 +02:00
|
|
|
const assert = require("node:assert/strict");
|
2020-11-30 23:46:45 +01:00
|
|
|
|
2023-02-22 23:04:10 +01:00
|
|
|
const {mock_esm, zrequire} = require("./lib/namespace");
|
|
|
|
const {run_test} = require("./lib/test");
|
2020-12-01 00:02:16 +01:00
|
|
|
|
2020-02-01 04:46:54 +01:00
|
|
|
let next_id = 0;
|
2021-03-12 16:11:56 +01:00
|
|
|
const messages = new Map();
|
|
|
|
|
|
|
|
function make_stream_message({stream_id, topic, sender_id}) {
|
|
|
|
next_id += 1;
|
|
|
|
|
|
|
|
const message = {
|
|
|
|
type: "stream",
|
|
|
|
stream_id,
|
|
|
|
id: next_id,
|
|
|
|
topic,
|
|
|
|
sender_id,
|
|
|
|
};
|
|
|
|
messages.set(message.id, message);
|
|
|
|
|
|
|
|
return message;
|
|
|
|
}
|
2020-05-01 08:29:08 +02:00
|
|
|
|
2023-02-22 23:04:10 +01:00
|
|
|
mock_esm("../src/message_store", {
|
2021-03-12 16:11:56 +01:00
|
|
|
get: (message_id) => messages.get(message_id),
|
2021-03-07 13:57:14 +01:00
|
|
|
});
|
2023-02-24 21:43:42 +01:00
|
|
|
const people = zrequire("people");
|
|
|
|
people.initialize_current_user(1);
|
2020-12-01 23:21:38 +01:00
|
|
|
const rs = zrequire("recent_senders");
|
2023-12-27 00:17:28 +01:00
|
|
|
zrequire("message_util.ts");
|
2020-12-01 23:21:38 +01:00
|
|
|
|
2021-03-12 16:11:56 +01:00
|
|
|
function test(label, f) {
|
2021-06-16 14:38:37 +02:00
|
|
|
run_test(label, ({override}) => {
|
2021-03-12 16:11:56 +01:00
|
|
|
messages.clear();
|
|
|
|
next_id = 0;
|
|
|
|
rs.clear_for_testing();
|
2021-06-16 14:38:37 +02:00
|
|
|
f({override});
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
test("IdTracker", () => {
|
|
|
|
const id_tracker = new rs.IdTracker();
|
|
|
|
|
|
|
|
function test_add(id, expected_max_id) {
|
|
|
|
id_tracker.add(id);
|
|
|
|
assert.equal(id_tracker.max_id(), expected_max_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
test_add(5, 5);
|
|
|
|
test_add(7, 7);
|
|
|
|
test_add(3, 7);
|
|
|
|
test_add(10, 10);
|
|
|
|
test_add(12, 12);
|
|
|
|
test_add(11, 12);
|
|
|
|
|
|
|
|
function test_remove(id, expected_max_id) {
|
|
|
|
id_tracker.remove(id);
|
|
|
|
assert.equal(id_tracker.max_id(), expected_max_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
test_remove(10, 12);
|
|
|
|
test_remove(999999, 12); // bogus id has no effect
|
|
|
|
test_remove(3, 12);
|
|
|
|
test_remove(12, 11);
|
|
|
|
|
|
|
|
test_add(3, 11);
|
|
|
|
test_add(7, 11);
|
|
|
|
test_add(13, 13);
|
|
|
|
|
|
|
|
test_remove(3, 13);
|
|
|
|
test_remove(13, 11);
|
|
|
|
});
|
|
|
|
|
|
|
|
test("noop process_topic_edit", () => {
|
|
|
|
// Just get line coverage on defensive code.
|
|
|
|
const bogus_ids = [333, 444];
|
|
|
|
rs.process_topic_edit({message_ids: bogus_ids});
|
|
|
|
});
|
|
|
|
|
|
|
|
test("update_topics_of_deleted_message_ids", () => {
|
|
|
|
// Just get line coverage on defensive code.
|
|
|
|
const stream_id = 555;
|
|
|
|
const topic = "whatever";
|
|
|
|
const sender_id = 999;
|
|
|
|
|
|
|
|
const message = make_stream_message({
|
|
|
|
stream_id,
|
|
|
|
topic,
|
|
|
|
sender_id,
|
|
|
|
});
|
|
|
|
rs.update_topics_of_deleted_message_ids([message.id]);
|
|
|
|
assert.deepEqual(rs.get_topic_recent_senders(stream_id, topic), []);
|
|
|
|
|
2022-11-22 11:45:29 +01:00
|
|
|
rs.process_stream_message(message);
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
assert.deepEqual(rs.get_topic_recent_senders(stream_id, topic), [sender_id]);
|
|
|
|
});
|
|
|
|
|
2022-11-22 11:45:29 +01:00
|
|
|
test("process_stream_message", () => {
|
2019-11-02 00:06:25 +01:00
|
|
|
const stream1 = 1;
|
|
|
|
const stream2 = 2;
|
|
|
|
const stream3 = 3;
|
2017-06-01 07:42:57 +02:00
|
|
|
|
2019-11-02 00:06:25 +01:00
|
|
|
const topic1 = "topic-1";
|
|
|
|
const topic2 = "topic-2";
|
|
|
|
const topic3 = "topic-3";
|
2020-05-01 08:29:08 +02:00
|
|
|
const topic4 = "topic-4";
|
2017-06-01 07:42:57 +02:00
|
|
|
|
2019-11-02 00:06:25 +01:00
|
|
|
const sender1 = 1;
|
|
|
|
const sender2 = 2;
|
|
|
|
const sender3 = 3;
|
2020-05-01 08:29:08 +02:00
|
|
|
const stream4 = 4;
|
|
|
|
const stream5 = 5;
|
2017-06-01 07:42:57 +02:00
|
|
|
|
|
|
|
// New stream
|
2021-03-12 16:11:56 +01:00
|
|
|
const message1 = make_stream_message({
|
2017-06-01 07:42:57 +02:00
|
|
|
stream_id: stream1,
|
2018-12-23 16:49:14 +01:00
|
|
|
topic: topic1,
|
2017-06-01 07:42:57 +02:00
|
|
|
sender_id: sender1,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
const message2 = make_stream_message({
|
2017-06-01 07:42:57 +02:00
|
|
|
stream_id: stream2,
|
2018-12-23 16:49:14 +01:00
|
|
|
topic: topic1,
|
2017-06-01 07:42:57 +02:00
|
|
|
sender_id: sender2,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
2020-05-01 08:29:08 +02:00
|
|
|
|
2022-11-22 11:45:29 +01:00
|
|
|
rs.process_stream_message(message1);
|
|
|
|
rs.process_stream_message(message2);
|
2017-06-01 07:42:57 +02:00
|
|
|
|
2017-08-29 16:57:47 +02:00
|
|
|
// Users have posted in only one of the streams
|
2017-06-01 07:42:57 +02:00
|
|
|
assert.equal(
|
|
|
|
rs.compare_by_recency({user_id: sender1}, {user_id: sender2}, stream1, topic1) < 0,
|
2020-07-15 00:34:28 +02:00
|
|
|
true,
|
|
|
|
);
|
2017-06-01 07:42:57 +02:00
|
|
|
assert.equal(
|
|
|
|
rs.compare_by_recency({user_id: sender1}, {user_id: sender2}, stream2, topic1) > 0,
|
2020-07-15 00:34:28 +02:00
|
|
|
true,
|
|
|
|
);
|
2017-08-29 16:57:47 +02:00
|
|
|
|
|
|
|
// Users haven't posted in this stream, return zero
|
2017-07-28 19:39:44 +02:00
|
|
|
assert.equal(
|
2017-08-29 16:57:47 +02:00
|
|
|
rs.compare_by_recency({user_id: sender1}, {user_id: sender2}, stream3, undefined) === 0,
|
2020-07-15 00:34:28 +02:00
|
|
|
true,
|
|
|
|
);
|
2017-06-01 07:42:57 +02:00
|
|
|
|
|
|
|
// New topic
|
2021-03-12 16:11:56 +01:00
|
|
|
const message3 = make_stream_message({
|
2017-06-01 07:42:57 +02:00
|
|
|
stream_id: stream1,
|
2018-12-23 16:49:14 +01:00
|
|
|
topic: topic2,
|
2017-06-01 07:42:57 +02:00
|
|
|
sender_id: sender3,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
2020-05-01 08:29:08 +02:00
|
|
|
|
2022-11-22 11:45:29 +01:00
|
|
|
rs.process_stream_message(message3);
|
2017-06-01 07:42:57 +02:00
|
|
|
assert.equal(
|
|
|
|
rs.compare_by_recency({user_id: sender3}, {user_id: sender2}, stream1, topic2) < 0,
|
2020-07-15 00:34:28 +02:00
|
|
|
true,
|
|
|
|
);
|
2017-06-01 07:42:57 +02:00
|
|
|
|
|
|
|
// New sender
|
2021-03-12 16:11:56 +01:00
|
|
|
const message4 = make_stream_message({
|
2017-06-01 07:42:57 +02:00
|
|
|
stream_id: stream1,
|
2018-12-23 16:49:14 +01:00
|
|
|
topic: topic1,
|
2017-06-01 07:42:57 +02:00
|
|
|
sender_id: sender2,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
2020-05-01 08:29:08 +02:00
|
|
|
|
2022-11-22 11:45:29 +01:00
|
|
|
rs.process_stream_message(message4);
|
2017-06-01 07:42:57 +02:00
|
|
|
assert.equal(
|
|
|
|
rs.compare_by_recency({user_id: sender1}, {user_id: sender2}, stream1, topic1) > 0,
|
2020-07-15 00:34:28 +02:00
|
|
|
true,
|
|
|
|
);
|
2017-06-01 07:42:57 +02:00
|
|
|
|
|
|
|
// More recent message
|
2021-03-12 16:11:56 +01:00
|
|
|
const message5 = make_stream_message({
|
2017-06-01 07:42:57 +02:00
|
|
|
stream_id: stream1,
|
2018-12-23 16:49:14 +01:00
|
|
|
topic: topic1,
|
2017-06-01 07:42:57 +02:00
|
|
|
sender_id: sender1,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
2020-05-01 08:29:08 +02:00
|
|
|
|
2022-11-22 11:45:29 +01:00
|
|
|
rs.process_stream_message(message5);
|
2017-06-01 07:42:57 +02:00
|
|
|
assert.equal(
|
|
|
|
rs.compare_by_recency({user_id: sender1}, {user_id: sender2}, stream1, topic1) < 0,
|
2020-07-15 00:34:28 +02:00
|
|
|
true,
|
|
|
|
);
|
2017-06-03 21:14:16 +02:00
|
|
|
|
2023-11-24 15:06:42 +01:00
|
|
|
// Messages sent by sender1 in stream1 > topic1
|
|
|
|
assert.equal(rs.get_topic_message_ids_for_sender(stream1, topic1, sender1).size, 2);
|
|
|
|
// Messages sent by sender1 in stream1 > topic2
|
|
|
|
assert.equal(rs.get_topic_message_ids_for_sender(stream1, topic2, sender1).size, 0);
|
|
|
|
|
2017-08-29 16:57:47 +02:00
|
|
|
// Same stream, but different topics
|
2021-03-12 16:11:56 +01:00
|
|
|
const message6 = make_stream_message({
|
2017-08-29 16:57:47 +02:00
|
|
|
stream_id: stream3,
|
2018-12-23 16:49:14 +01:00
|
|
|
topic: topic1,
|
2017-08-29 16:57:47 +02:00
|
|
|
sender_id: sender1,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
|
|
|
const message7 = make_stream_message({
|
2017-08-29 16:57:47 +02:00
|
|
|
stream_id: stream3,
|
2018-12-23 16:49:14 +01:00
|
|
|
topic: topic2,
|
2017-08-29 16:57:47 +02:00
|
|
|
sender_id: sender2,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
|
|
|
const message8 = make_stream_message({
|
2017-08-29 16:57:47 +02:00
|
|
|
stream_id: stream3,
|
2018-12-23 16:49:14 +01:00
|
|
|
topic: topic3,
|
2017-08-29 16:57:47 +02:00
|
|
|
sender_id: sender3,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
2017-08-29 16:57:47 +02:00
|
|
|
|
2022-11-22 11:45:29 +01:00
|
|
|
rs.process_stream_message(message6);
|
|
|
|
rs.process_stream_message(message7);
|
|
|
|
rs.process_stream_message(message8);
|
2017-08-29 16:57:47 +02:00
|
|
|
|
|
|
|
// topic3 has a message in it, but sender1 nor sender2 have participated, so sort by stream
|
|
|
|
assert.equal(
|
|
|
|
rs.compare_by_recency({user_id: sender1}, {user_id: sender2}, stream3, topic3) > 0,
|
2020-07-15 00:34:28 +02:00
|
|
|
true,
|
|
|
|
);
|
2017-08-29 16:57:47 +02:00
|
|
|
assert.equal(
|
|
|
|
rs.compare_by_recency({user_id: sender2}, {user_id: sender1}, stream3, topic3) < 0,
|
2020-07-15 00:34:28 +02:00
|
|
|
true,
|
|
|
|
);
|
2017-08-29 16:57:47 +02:00
|
|
|
|
2020-07-15 00:34:28 +02:00
|
|
|
assert.equal(rs.compare_by_recency({}, {}, (next_id += 1), ""), 0);
|
2020-05-01 08:29:08 +02:00
|
|
|
|
|
|
|
// new message in topic2
|
2021-03-12 16:11:56 +01:00
|
|
|
const message9 = make_stream_message({
|
2020-05-01 08:29:08 +02:00
|
|
|
stream_id: stream3,
|
|
|
|
topic: topic2,
|
|
|
|
sender_id: sender3,
|
2021-03-12 16:11:56 +01:00
|
|
|
});
|
2020-05-01 08:29:08 +02:00
|
|
|
|
2022-11-22 11:45:29 +01:00
|
|
|
rs.process_stream_message(message9);
|
2020-05-01 08:29:08 +02:00
|
|
|
|
|
|
|
// Test topic change
|
2020-07-15 00:34:28 +02:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream3, topic3).toString(), "3");
|
2022-11-24 07:37:21 +01:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream3, topic2).toString(), "3,2");
|
2020-05-01 08:29:08 +02:00
|
|
|
|
|
|
|
// message7's topic was changed by user
|
2021-03-12 16:11:56 +01:00
|
|
|
message7.topic = topic3;
|
2020-05-01 08:29:08 +02:00
|
|
|
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
rs.process_topic_edit({
|
|
|
|
message_ids: [message7.id],
|
|
|
|
old_stream_id: stream3,
|
|
|
|
new_stream_id: stream3,
|
|
|
|
old_topic: topic2,
|
|
|
|
new_topic: topic3,
|
|
|
|
});
|
|
|
|
|
2022-11-24 07:37:21 +01:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream3, topic3).toString(), "3,2");
|
2020-07-15 00:34:28 +02:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream3, topic2).toString(), "3");
|
2020-05-01 08:29:08 +02:00
|
|
|
|
|
|
|
// Test stream change
|
2022-11-24 07:37:21 +01:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream3, topic3).toString(), "3,2");
|
2020-07-15 00:34:28 +02:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream4, topic3).toString(), "");
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
|
|
|
|
message7.stream_id = stream4;
|
2021-03-12 16:11:56 +01:00
|
|
|
message8.stream_id = stream4;
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
rs.process_topic_edit({
|
|
|
|
message_ids: [message7.id, message8.id],
|
|
|
|
old_stream_id: stream3,
|
|
|
|
new_stream_id: stream4,
|
|
|
|
old_topic: topic3,
|
|
|
|
new_topic: topic3,
|
|
|
|
});
|
|
|
|
|
2020-07-15 00:34:28 +02:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream3, topic3).toString(), "");
|
2022-11-24 07:37:21 +01:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream4, topic3).toString(), "3,2");
|
2020-05-01 08:29:08 +02:00
|
|
|
|
|
|
|
// Test stream & topic change
|
2022-11-24 07:37:21 +01:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream4, topic3).toString(), "3,2");
|
2020-07-15 00:34:28 +02:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream5, topic4).toString(), "");
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
|
2021-03-12 16:11:56 +01:00
|
|
|
message7.stream_id = stream5;
|
|
|
|
message7.topic = topic4;
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
|
|
|
|
message8.stream_id = stream5;
|
2021-03-12 16:11:56 +01:00
|
|
|
message8.topic = topic4;
|
2020-07-15 09:36:03 +02:00
|
|
|
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
rs.process_topic_edit({
|
|
|
|
message_ids: [message7.id, message8.id],
|
|
|
|
old_stream_id: stream4,
|
|
|
|
new_stream_id: stream5,
|
|
|
|
old_topic: topic3,
|
|
|
|
new_topic: topic4,
|
|
|
|
});
|
2021-03-12 16:11:56 +01:00
|
|
|
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream4, topic3).toString(), "");
|
2022-11-24 07:37:21 +01:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream5, topic4).toString(), "3,2");
|
|
|
|
assert.equal(rs.get_topic_recent_senders(stream1, topic1).toString(), "1,2");
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
|
2020-07-15 09:36:03 +02:00
|
|
|
// delete message1 and message5 sent by sender1
|
2020-08-06 21:00:28 +02:00
|
|
|
rs.update_topics_of_deleted_message_ids([message1.id, message5.id]);
|
2020-07-15 00:34:28 +02:00
|
|
|
assert.equal(rs.get_topic_recent_senders(stream1, topic1).toString(), "2");
|
2020-08-07 09:16:25 +02:00
|
|
|
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
// test that we can remove again, harmlessly
|
|
|
|
rs.update_topics_of_deleted_message_ids([message1.id, message5.id]);
|
|
|
|
assert.equal(rs.get_topic_recent_senders(stream1, topic1).toString(), "2");
|
|
|
|
|
|
|
|
// remove some more senders
|
|
|
|
rs.update_topics_of_deleted_message_ids([message2.id, message3.id, message4.id, message5.id]);
|
|
|
|
assert.equal(rs.get_topic_recent_senders(stream1, topic1).toString(), "");
|
|
|
|
|
|
|
|
rs.update_topics_of_deleted_message_ids([message6.id, message7.id, message8.id, message9.id]);
|
|
|
|
assert.equal(rs.get_topic_recent_senders(stream1, topic1).toString(), "");
|
|
|
|
assert.equal(rs.get_topic_recent_senders(stream2, topic2).toString(), "");
|
|
|
|
assert.equal(rs.get_topic_recent_senders(stream3, topic3).toString(), "");
|
|
|
|
|
2020-08-07 09:16:25 +02:00
|
|
|
// deleting an old message which isn't locally stored.
|
|
|
|
// We are just testing that it doesn't raise an error;
|
|
|
|
// no changes should take place in this case.
|
|
|
|
rs.update_topics_of_deleted_message_ids([-1]);
|
refactor: Simplify recent_senders code.
This reduces our dependency on message_list code (via
message_util), and it makes moving streams/topics and
deleting messages more performant.
For every single message that was being updated or
deleted, the previous code was basically re-computing
lots of things, including having to iterate through
every message in memory to find the messages matching
your topic.
Now everything basically happens in O(1) time.
The only O(N) computation is that we now lazily
re-compute the max message id every time you need it
for typeahead logic, and then we cache it for
subsequent use. The N here is the number of messages
that the particular sender has sent to the particular
stream/topic combination, so it should always be quite
small, except for certain spammy bots.
Once the max has been calculated, the common operation
of adding a message doesn't invalidate our cached
value. We only invalidate the cache on deletes.
The main change that we make here from a data
standpoint is that we just keep track of all
message_ids for all senders. The storage overhead here
should be negligible. By keeping track of our own
messages, we don't have to punt to other code for
update/delete situations.
There is similar code in recent_topics that I think can
be improved in similar ways, and it would allow us to
eliminate functions like this one:
export function get_messages_in_topic(stream_id, topic) {
return message_list.all
.all_messages()
.filter(
(x) =>
x.type === "stream" &&
x.stream_id === stream_id &&
x.topic.toLowerCase() === topic.toLowerCase(),
);
}
2021-03-29 19:42:44 +02:00
|
|
|
|
|
|
|
// Comparing on a non-existent topic doesn't crash.
|
|
|
|
assert.equal(
|
|
|
|
rs.compare_by_recency({user_id: sender2}, {user_id: sender1}, stream3, "bogus") < 0,
|
|
|
|
true,
|
|
|
|
);
|
2018-05-15 12:40:07 +02:00
|
|
|
});
|
2022-11-19 07:39:00 +01:00
|
|
|
|
|
|
|
test("process_pms", () => {
|
|
|
|
const sender1 = 1; // Current user id
|
|
|
|
const sender2 = 2;
|
|
|
|
const sender3 = 3;
|
|
|
|
|
|
|
|
const user_ids_string = "2,3,4";
|
|
|
|
rs.process_private_message({
|
|
|
|
to_user_ids: user_ids_string,
|
|
|
|
sender_id: sender2,
|
|
|
|
id: 1,
|
|
|
|
});
|
|
|
|
rs.process_private_message({
|
|
|
|
to_user_ids: user_ids_string,
|
|
|
|
sender_id: sender3,
|
|
|
|
id: 2,
|
|
|
|
});
|
|
|
|
rs.process_private_message({
|
|
|
|
to_user_ids: user_ids_string,
|
|
|
|
sender_id: sender1,
|
|
|
|
id: 3,
|
|
|
|
});
|
|
|
|
|
2023-09-06 23:04:07 +02:00
|
|
|
// Recent Conversations displays avatars in the opposite order to this since
|
2022-11-19 07:39:00 +01:00
|
|
|
// that was simpler to implement in HTML.
|
|
|
|
assert.deepEqual(rs.get_pm_recent_senders(user_ids_string), {
|
|
|
|
participants: [1, 3, 2],
|
|
|
|
non_participants: [4],
|
|
|
|
});
|
2023-06-16 17:37:19 +02:00
|
|
|
// Direct message doesn't exist.
|
2022-11-19 07:39:00 +01:00
|
|
|
assert.deepEqual(rs.get_pm_recent_senders("1000,2000"), {
|
|
|
|
participants: [],
|
|
|
|
non_participants: [],
|
|
|
|
});
|
2023-02-22 05:45:38 +01:00
|
|
|
|
|
|
|
rs.process_private_message({
|
|
|
|
to_user_ids: "1",
|
|
|
|
sender_id: sender1,
|
|
|
|
id: 4,
|
|
|
|
});
|
|
|
|
assert.deepEqual(rs.get_pm_recent_senders("1"), {
|
|
|
|
participants: [1],
|
|
|
|
non_participants: [],
|
|
|
|
});
|
2022-11-19 07:39:00 +01:00
|
|
|
});
|