mirror of https://github.com/zulip/zulip.git
markdown: Correctly retrieve indices for repeated matches.
The same pattern being matched multiple times in a topic cannot be properly ordered using topic_name.find(match_text) and etc. when there are multiple matches of the same pattern in the topic. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
This commit is contained in:
parent
5b37306c27
commit
4602c34108
|
@ -230,3 +230,44 @@ run_test("topic links", () => {
|
|||
},
|
||||
]);
|
||||
});
|
||||
|
||||
run_test("topic links repeated", () => {
|
||||
// Links generated from repeated patterns should preserve the order.
|
||||
const topic =
|
||||
"#foo101 https://google.com #foo102 #foo103 https://google.com #foo101 #foo102 #foo103";
|
||||
const topic_links = markdown.get_topic_links({topic, get_linkifier_map});
|
||||
assert.deepEqual(topic_links, [
|
||||
{
|
||||
text: "#foo101",
|
||||
url: "http://foo.com/101",
|
||||
},
|
||||
{
|
||||
text: "https://google.com",
|
||||
url: "https://google.com",
|
||||
},
|
||||
{
|
||||
text: "#foo102",
|
||||
url: "http://foo.com/102",
|
||||
},
|
||||
{
|
||||
text: "#foo103",
|
||||
url: "http://foo.com/103",
|
||||
},
|
||||
{
|
||||
text: "https://google.com",
|
||||
url: "https://google.com",
|
||||
},
|
||||
{
|
||||
text: "#foo101",
|
||||
url: "http://foo.com/101",
|
||||
},
|
||||
{
|
||||
text: "#foo102",
|
||||
url: "http://foo.com/102",
|
||||
},
|
||||
{
|
||||
text: "#foo103",
|
||||
url: "http://foo.com/103",
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
|
|
@ -296,17 +296,15 @@ export function get_topic_links({topic, get_linkifier_map}) {
|
|||
}
|
||||
// We store the starting index as well, to sort the order of occurrence of the links
|
||||
// in the topic, similar to the logic implemented in zerver/lib/markdown/__init__.py
|
||||
links.push({url: link_url, text: match[0], index: topic.indexOf(match[0])});
|
||||
links.push({url: link_url, text: match[0], index: match.index});
|
||||
}
|
||||
}
|
||||
|
||||
// Also make raw URLs navigable
|
||||
const url_re = /\b(https?:\/\/[^\s<]+[^\s"'),.:;<\]])/g; // Slightly modified from third/marked.js
|
||||
const matches = topic.match(url_re);
|
||||
if (matches) {
|
||||
for (const match of matches) {
|
||||
links.push({url: match, text: match, index: topic.indexOf(match)});
|
||||
}
|
||||
let match;
|
||||
while ((match = url_re.exec(topic)) !== null) {
|
||||
links.push({url: match[0], text: match[0], index: match.index});
|
||||
}
|
||||
links.sort((a, b) => a.index - b.index);
|
||||
for (const match of links) {
|
||||
|
|
|
@ -2414,13 +2414,20 @@ def topic_links(linkifiers_key: int, topic_name: str) -> List[Dict[str, str]]:
|
|||
dict(
|
||||
url=url_format_string % match_details,
|
||||
text=match_text,
|
||||
index=topic_name.find(match_text),
|
||||
index=m.start(),
|
||||
)
|
||||
]
|
||||
|
||||
pos = 0
|
||||
# Also make raw URLs navigable.
|
||||
for sub_string in basic_link_splitter.split(topic_name):
|
||||
link_match = re.match(get_web_link_regex(), sub_string)
|
||||
while pos < len(topic_name):
|
||||
# Assuming that basic_link_splitter matches 1 character,
|
||||
# we match segments of the string for URL divided by the matched character.
|
||||
next_split = basic_link_splitter.search(topic_name, pos)
|
||||
end = next_split.start() if next_split is not None else len(topic_name)
|
||||
# We have to match the substring because LINK_REGEX
|
||||
# matches the start of the entire string with "^"
|
||||
link_match = re.match(get_web_link_regex(), topic_name[pos:end])
|
||||
if link_match:
|
||||
actual_match_url = link_match.group("url")
|
||||
result = urlsplit(actual_match_url)
|
||||
|
@ -2431,9 +2438,9 @@ def topic_links(linkifiers_key: int, topic_name: str) -> List[Dict[str, str]]:
|
|||
url = result._replace(scheme="https").geturl()
|
||||
else:
|
||||
url = actual_match_url
|
||||
matches.append(
|
||||
dict(url=url, text=actual_match_url, index=topic_name.find(actual_match_url))
|
||||
)
|
||||
matches.append(dict(url=url, text=actual_match_url, index=pos))
|
||||
# Move pass the next split point, and start matching the URL from there
|
||||
pos = end + 1
|
||||
|
||||
# In order to preserve the order in which the links occur, we sort the matched text
|
||||
# based on its starting index in the topic. We pop the index field before returning.
|
||||
|
|
|
@ -1340,6 +1340,21 @@ class MarkdownTest(ZulipTestCase):
|
|||
],
|
||||
)
|
||||
|
||||
msg.set_topic_name("#111 https://google.com #111 #222 #111 https://google.com #222")
|
||||
converted_topic = topic_links(realm.id, msg.topic_name())
|
||||
self.assertEqual(
|
||||
converted_topic,
|
||||
[
|
||||
{"url": "https://trac.example.com/ticket/111", "text": "#111"},
|
||||
{"url": "https://google.com", "text": "https://google.com"},
|
||||
{"url": "https://trac.example.com/ticket/111", "text": "#111"},
|
||||
{"url": "https://trac.example.com/ticket/222", "text": "#222"},
|
||||
{"url": "https://trac.example.com/ticket/111", "text": "#111"},
|
||||
{"url": "https://google.com", "text": "https://google.com"},
|
||||
{"url": "https://trac.example.com/ticket/222", "text": "#222"},
|
||||
],
|
||||
)
|
||||
|
||||
msg.set_topic_name("#444 #555 #666")
|
||||
converted_topic = topic_links(realm.id, msg.topic_name())
|
||||
self.assertEqual(
|
||||
|
|
Loading…
Reference in New Issue