typeahead: Increase token limit to scan '#'.

As per commit 0965d4e893, the logic has been
switched from returning "topic_jump" from
tokenize_compose_str, which results in closing of
the typeahead to returning the sliced token starting
from '#'.
This change causes some test cases to fail due to
increased requirements in processing of the token
to search for "#" instead of ">" which caused early exits,
ultimately causing no typeaheads to be triggered.
Due to this, the limit has been bumped up from 25
to 40 characters to accomodate the typeahead
triggers in case of large stream and topic names.
This commit is contained in:
Apoorva Pendse 2024-11-18 19:50:37 +05:30
parent 0965d4e893
commit 08d29479c4
1 changed files with 1 additions and 1 deletions

View File

@ -411,7 +411,7 @@ export function tokenize_compose_str(s: string): string {
// after the first character. // after the first character.
let i = s.length; let i = s.length;
let min_i = s.length - 25; let min_i = s.length - 40;
if (min_i < 0) { if (min_i < 0) {
min_i = 0; min_i = 0;
} }