mirror of https://github.com/zulip/zulip.git
Split out markdown.js from echo.js.
The new module handles markdown rendering. The code left behind in echo.js does local-echo kind of things like reifying message ids.
This commit is contained in:
parent
61d5d41067
commit
0a0f567aeb
|
@ -128,6 +128,7 @@
|
|||
"templates": false,
|
||||
"alert_words": false,
|
||||
"fenced_code": false,
|
||||
"markdown": false,
|
||||
"echo": false,
|
||||
"localstorage": false,
|
||||
"localStorage": false,
|
||||
|
|
|
@ -18,24 +18,24 @@ sender of a message, and they are (ideally) identical to the backend
|
|||
rendering.
|
||||
|
||||
The JavaScript markdown implementation has a function,
|
||||
`echo.contains_bugdown`, that is used to check whether a message
|
||||
`markdown.contains_bugdown`, that is used to check whether a message
|
||||
contains any syntax that needs to be rendered to HTML on the backend.
|
||||
If `echo.contains_bugdown` returns true, the frontend simply won't
|
||||
If `markdown.contains_bugdown` returns true, the frontend simply won't
|
||||
echo the message for the sender until it receives the rendered HTML
|
||||
from the backend. If there is a bug where `echo.contains_bugdown`
|
||||
from the backend. If there is a bug where `markdown.contains_bugdown`
|
||||
returns false incorrectly, the frontend will discover this when the
|
||||
backend returns the newly sent message, and will update the HTML based
|
||||
on the authoritative backend rendering (which would cause a change in
|
||||
the rendering that is visible only to the sender shortly after a
|
||||
message is sent). As a result, we try to make sure that
|
||||
`echo.contains_bugdown` is always correct.
|
||||
`markdown.contains_bugdown` is always correct.
|
||||
|
||||
## Testing
|
||||
|
||||
The Python-Markdown implementation is tested by
|
||||
`zerver/tests/test_bugdown.py`, and the marked.js implementation and
|
||||
`echo.contains_bugdown` are tested by
|
||||
`frontend_tests/node_tests/echo.js`. A shared set of fixed test data
|
||||
`markdown.contains_bugdown` are tested by
|
||||
`frontend_tests/node_tests/markdown.js`. A shared set of fixed test data
|
||||
("test fixtures") is present in `zerver/fixtures/bugdown-data.json`,
|
||||
and is automatically used by both test suites; as a result, it the
|
||||
preferred place to add new tests for Zulip's markdown system.
|
||||
|
@ -58,8 +58,8 @@ When changing Zulip's markdown syntax, you need to update several
|
|||
places:
|
||||
|
||||
* The backend markdown processor (`zerver/lib/bugdown/__init__.py`).
|
||||
* The frontend markdown processor (`static/js/echo.js` and sometimes
|
||||
`static/third/marked/lib/marked.js`), or `echo.contains_bugdown` if
|
||||
* The frontend markdown processor (`static/js/markdown.js` and sometimes
|
||||
`static/third/marked/lib/marked.js`), or `markdown.contains_bugdown` if
|
||||
your changes won't be supported in the frontend processor.
|
||||
* If desired, the typeahead logic in `static/js/composebox_typeahead.js`.
|
||||
* The test suite, probably via adding entries to `zerver/fixtures/bugdown-data.json`.
|
||||
|
|
|
@ -32,6 +32,9 @@ set_global('echo', {
|
|||
process_from_server: function (messages) {
|
||||
return messages;
|
||||
},
|
||||
});
|
||||
|
||||
set_global('markdown', {
|
||||
set_realm_filters: noop,
|
||||
});
|
||||
|
||||
|
|
|
@ -98,7 +98,9 @@ var social = {
|
|||
stream_data.add_sub('Denmark', denmark);
|
||||
stream_data.add_sub('social', social);
|
||||
|
||||
var echo = require('js/echo.js');
|
||||
var markdown = require('js/markdown.js');
|
||||
|
||||
markdown.initialize();
|
||||
|
||||
var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver/fixtures/bugdown-data.json'), 'utf8', 'r'));
|
||||
|
||||
|
@ -141,11 +143,11 @@ var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver
|
|||
];
|
||||
|
||||
no_markup.forEach(function (content) {
|
||||
assert.equal(echo.contains_bugdown(content), false);
|
||||
assert.equal(markdown.contains_bugdown(content), false);
|
||||
});
|
||||
|
||||
markup.forEach(function (content) {
|
||||
assert.equal(echo.contains_bugdown(content), true);
|
||||
assert.equal(markdown.contains_bugdown(content), true);
|
||||
});
|
||||
}());
|
||||
|
||||
|
@ -153,7 +155,7 @@ var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver
|
|||
var tests = bugdown_data.regular_tests;
|
||||
tests.forEach(function (test) {
|
||||
var message = {raw_content: test.input};
|
||||
echo.apply_markdown(message);
|
||||
markdown.apply_markdown(message);
|
||||
var output = message.content;
|
||||
|
||||
if (test.bugdown_matches_marked) {
|
||||
|
@ -166,15 +168,15 @@ var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver
|
|||
|
||||
(function test_message_flags() {
|
||||
var message = {raw_content: '@**Leo**'};
|
||||
echo.apply_markdown(message);
|
||||
markdown.apply_markdown(message);
|
||||
assert(!_.contains(message.flags, 'mentioned'));
|
||||
|
||||
message = {raw_content: '@**Cordelia Lear**'};
|
||||
echo.apply_markdown(message);
|
||||
markdown.apply_markdown(message);
|
||||
assert(_.contains(message.flags, 'mentioned'));
|
||||
|
||||
message = {raw_content: '@**all**'};
|
||||
echo.apply_markdown(message);
|
||||
markdown.apply_markdown(message);
|
||||
assert(_.contains(message.flags, 'mentioned'));
|
||||
}());
|
||||
|
||||
|
@ -235,7 +237,7 @@ var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver
|
|||
var expected = test_case.expected;
|
||||
|
||||
var message = {raw_content: input};
|
||||
echo.apply_markdown(message);
|
||||
markdown.apply_markdown(message);
|
||||
var output = message.content;
|
||||
|
||||
assert.equal(expected, output);
|
||||
|
@ -245,34 +247,34 @@ var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver
|
|||
|
||||
(function test_subject_links() {
|
||||
var message = {type: 'stream', subject: "No links here"};
|
||||
echo._add_subject_links(message);
|
||||
markdown.add_subject_links(message);
|
||||
assert.equal(message.subject_links.length, []);
|
||||
|
||||
message = {type: 'stream', subject: "One #123 link here"};
|
||||
echo._add_subject_links(message);
|
||||
markdown.add_subject_links(message);
|
||||
assert.equal(message.subject_links.length, 1);
|
||||
assert.equal(message.subject_links[0], "https://trac.zulip.net/ticket/123");
|
||||
|
||||
message = {type: 'stream', subject: "Two #123 #456 link here"};
|
||||
echo._add_subject_links(message);
|
||||
markdown.add_subject_links(message);
|
||||
assert.equal(message.subject_links.length, 2);
|
||||
assert.equal(message.subject_links[0], "https://trac.zulip.net/ticket/123");
|
||||
assert.equal(message.subject_links[1], "https://trac.zulip.net/ticket/456");
|
||||
|
||||
message = {type: 'stream', subject: "New ZBUG_123 link here"};
|
||||
echo._add_subject_links(message);
|
||||
markdown.add_subject_links(message);
|
||||
assert.equal(message.subject_links.length, 1);
|
||||
assert.equal(message.subject_links[0], "https://trac2.zulip.net/ticket/123");
|
||||
|
||||
|
||||
message = {type: 'stream', subject: "New ZBUG_123 with #456 link here"};
|
||||
echo._add_subject_links(message);
|
||||
markdown.add_subject_links(message);
|
||||
assert.equal(message.subject_links.length, 2);
|
||||
assert(message.subject_links.indexOf("https://trac2.zulip.net/ticket/123") !== -1);
|
||||
assert(message.subject_links.indexOf("https://trac.zulip.net/ticket/456") !== -1);
|
||||
|
||||
message = {type: 'stream', subject: "One ZGROUP_123:45 link here"};
|
||||
echo._add_subject_links(message);
|
||||
markdown.add_subject_links(message);
|
||||
assert.equal(message.subject_links.length, 1);
|
||||
assert.equal(message.subject_links[0], "https://zone_45.zulip.net/ticket/123");
|
||||
}());
|
||||
|
@ -281,8 +283,8 @@ var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver
|
|||
var input = "/me is testing this";
|
||||
var message = {subject: "No links here", raw_content: input};
|
||||
message.flags = ['read'];
|
||||
echo.apply_markdown(message);
|
||||
echo._add_message_flags(message);
|
||||
markdown.apply_markdown(message);
|
||||
markdown.add_message_flags(message);
|
||||
|
||||
assert.equal(message.flags.length, 2);
|
||||
assert(message.flags.indexOf('read') !== -1);
|
||||
|
@ -290,23 +292,23 @@ var bugdown_data = JSON.parse(fs.readFileSync(path.join(__dirname, '../../zerver
|
|||
|
||||
input = "testing this @**all** @**Cordelia Lear**";
|
||||
message = {subject: "No links here", raw_content: input};
|
||||
echo.apply_markdown(message);
|
||||
echo._add_message_flags(message);
|
||||
markdown.apply_markdown(message);
|
||||
markdown.add_message_flags(message);
|
||||
|
||||
assert.equal(message.flags.length, 1);
|
||||
assert(message.flags.indexOf('mentioned') !== -1);
|
||||
|
||||
input = "test @all";
|
||||
message = {subject: "No links here", raw_content: input};
|
||||
echo.apply_markdown(message);
|
||||
echo._add_message_flags(message);
|
||||
markdown.apply_markdown(message);
|
||||
markdown.add_message_flags(message);
|
||||
assert.equal(message.flags.length, 1);
|
||||
assert(message.flags.indexOf('mentioned') !== -1);
|
||||
|
||||
input = "test @any";
|
||||
message = {subject: "No links here", raw_content: input};
|
||||
echo.apply_markdown(message);
|
||||
echo._add_message_flags(message);
|
||||
markdown.apply_markdown(message);
|
||||
markdown.add_message_flags(message);
|
||||
assert.equal(message.flags.length, 0);
|
||||
assert(message.flags.indexOf('mentioned') === -1);
|
||||
}());
|
|
@ -762,7 +762,7 @@ $(function () {
|
|||
if (message.length === 0) {
|
||||
$("#preview_content").html(i18n.t("Nothing to preview"));
|
||||
} else {
|
||||
if (echo.contains_bugdown(message)) {
|
||||
if (markdown.contains_bugdown(message)) {
|
||||
var spinner = $("#markdown_preview_spinner").expectOne();
|
||||
loading.make_indicator(spinner);
|
||||
} else {
|
||||
|
@ -771,22 +771,22 @@ $(function () {
|
|||
// marked.js frontend processor, we render using the
|
||||
// frontend markdown processor message (but still
|
||||
// render server-side to ensure the preview is
|
||||
// accurate; if the `echo.contains_bugdown` logic is
|
||||
// accurate; if the `markdown.contains_bugdown` logic is
|
||||
// incorrect wrong, users will see a brief flicker).
|
||||
$("#preview_content").html(echo.apply_markdown(message));
|
||||
$("#preview_content").html(markdown.apply_markdown(message));
|
||||
}
|
||||
channel.post({
|
||||
url: '/json/messages/render',
|
||||
idempotent: true,
|
||||
data: {content: message},
|
||||
success: function (response_data) {
|
||||
if (echo.contains_bugdown(message)) {
|
||||
if (markdown.contains_bugdown(message)) {
|
||||
loading.destroy_indicator($("#markdown_preview_spinner"));
|
||||
}
|
||||
$("#preview_content").html(response_data.rendered);
|
||||
},
|
||||
error: function () {
|
||||
if (echo.contains_bugdown(message)) {
|
||||
if (markdown.contains_bugdown(message)) {
|
||||
loading.destroy_indicator($("#markdown_preview_spinner"));
|
||||
}
|
||||
$("#preview_content").html(i18n.t("Failed to generate preview"));
|
||||
|
|
|
@ -182,7 +182,7 @@ exports.setup_page = function (callback) {
|
|||
|
||||
};
|
||||
|
||||
echo.apply_markdown(formatted);
|
||||
markdown.apply_markdown(formatted);
|
||||
} else {
|
||||
var emails = util.extract_pm_recipients(draft.private_message_recipient);
|
||||
var recipients = _.map(emails, function (email) {
|
||||
|
@ -200,7 +200,7 @@ exports.setup_page = function (callback) {
|
|||
recipients: recipients,
|
||||
raw_content: draft.content,
|
||||
};
|
||||
echo.apply_markdown(formatted);
|
||||
markdown.apply_markdown(formatted);
|
||||
}
|
||||
return formatted;
|
||||
});
|
||||
|
|
|
@ -1,69 +1,11 @@
|
|||
// This contains zulip's frontend markdown implementation; see
|
||||
// docs/markdown.md for docs on our Markdown syntax.
|
||||
|
||||
var echo = (function () {
|
||||
|
||||
var exports = {};
|
||||
|
||||
var waiting_for_id = {};
|
||||
var waiting_for_ack = {};
|
||||
var realm_filter_map = {};
|
||||
var realm_filter_list = [];
|
||||
var home_view_loaded = false;
|
||||
|
||||
// Regexes that match some of our common bugdown markup
|
||||
var bugdown_re = [
|
||||
// Inline image previews, check for contiguous chars ending in image suffix
|
||||
// To keep the below regexes simple, split them out for the end-of-message case
|
||||
/[^\s]*(?:\.bmp|\.gif|\.jpg|\.jpeg|\.png|\.webp)\s+/m,
|
||||
/[^\s]*(?:\.bmp|\.gif|\.jpg|\.jpeg|\.png|\.webp)$/m,
|
||||
// Twitter and youtube links are given previews
|
||||
/[^\s]*(?:twitter|youtube).com\/[^\s]*/,
|
||||
];
|
||||
|
||||
exports.contains_bugdown = function contains_bugdown(content) {
|
||||
// Try to guess whether or not a message has bugdown in it
|
||||
// If it doesn't, we can immediately render it client-side
|
||||
var markedup = _.find(bugdown_re, function (re) {
|
||||
return re.test(content);
|
||||
});
|
||||
return markedup !== undefined;
|
||||
};
|
||||
|
||||
function push_uniquely(lst, elem) {
|
||||
if (!_.contains(lst, elem)) {
|
||||
lst.push(elem);
|
||||
}
|
||||
}
|
||||
|
||||
exports.apply_markdown = function apply_markdown(message) {
|
||||
if (message.flags === undefined) {
|
||||
message.flags = [];
|
||||
}
|
||||
|
||||
// Our python-markdown processor appends two \n\n to input
|
||||
var options = {
|
||||
userMentionHandler: function (name) {
|
||||
var person = people.get_by_name(name);
|
||||
if (person !== undefined) {
|
||||
if (people.is_my_user_id(person.user_id)) {
|
||||
push_uniquely(message.flags, 'mentioned');
|
||||
}
|
||||
return '<span class="user-mention" data-user-id="' + person.user_id + '">' +
|
||||
'@' + person.full_name +
|
||||
'</span>';
|
||||
} else if (name === 'all' || name === 'everyone') {
|
||||
push_uniquely(message.flags, 'mentioned');
|
||||
return '<span class="user-mention" data-user-id="*">' +
|
||||
'@' + name +
|
||||
'</span>';
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
};
|
||||
message.content = marked(message.raw_content + '\n\n', options).trim();
|
||||
};
|
||||
|
||||
function resend_message(message, row) {
|
||||
message.content = message.raw_content;
|
||||
var retry_spinner = row.find('.refresh-failed-message');
|
||||
|
@ -94,48 +36,6 @@ function truncate_precision(float) {
|
|||
return parseFloat(float.toFixed(3));
|
||||
}
|
||||
|
||||
function add_message_flags(message) {
|
||||
// Note: mention flags are set in apply_markdown()
|
||||
|
||||
if (message.raw_content.indexOf('/me ') === 0 &&
|
||||
message.content.indexOf('<p>') === 0 &&
|
||||
message.content.lastIndexOf('</p>') === message.content.length - 4) {
|
||||
message.flags.push('is_me_message');
|
||||
}
|
||||
}
|
||||
|
||||
function add_subject_links(message) {
|
||||
if (message.type !== 'stream') {
|
||||
message.subject_links = [];
|
||||
return;
|
||||
}
|
||||
var subject = message.subject;
|
||||
var links = [];
|
||||
_.each(realm_filter_list, function (realm_filter) {
|
||||
var pattern = realm_filter[0];
|
||||
var url = realm_filter[1];
|
||||
var match;
|
||||
while ((match = pattern.exec(subject)) !== null) {
|
||||
var link_url = url;
|
||||
var matched_groups = match.slice(1);
|
||||
var i = 0;
|
||||
while (i < matched_groups.length) {
|
||||
var matched_group = matched_groups[i];
|
||||
var current_group = i + 1;
|
||||
var back_ref = "\\" + current_group;
|
||||
link_url = link_url.replace(back_ref, matched_group);
|
||||
i += 1;
|
||||
}
|
||||
links.push(link_url);
|
||||
}
|
||||
});
|
||||
message.subject_links = links;
|
||||
}
|
||||
|
||||
// For unit testing
|
||||
exports._add_subject_links = add_subject_links;
|
||||
exports._add_message_flags = add_message_flags;
|
||||
|
||||
function get_next_local_id() {
|
||||
var local_id_increment = 0.01;
|
||||
var latest = page_params.max_message_id;
|
||||
|
@ -157,8 +57,10 @@ function insert_local_message(message_request, local_id) {
|
|||
message.flags = ['read']; // we may add more flags later
|
||||
|
||||
message.raw_content = message.content;
|
||||
|
||||
// NOTE: This will parse synchronously. We're not using the async pipeline
|
||||
exports.apply_markdown(message);
|
||||
markdown.apply_markdown(message);
|
||||
|
||||
message.content_type = 'text/html';
|
||||
message.sender_email = people.my_current_email();
|
||||
message.sender_full_name = people.my_full_name();
|
||||
|
@ -166,8 +68,8 @@ function insert_local_message(message_request, local_id) {
|
|||
message.timestamp = new XDate().getTime() / 1000;
|
||||
message.local_id = local_id;
|
||||
message.id = message.local_id;
|
||||
add_message_flags(message);
|
||||
add_subject_links(message);
|
||||
markdown.add_message_flags(message);
|
||||
markdown.add_subject_links(message);
|
||||
|
||||
waiting_for_id[message.local_id] = message;
|
||||
waiting_for_ack[message.local_id] = message;
|
||||
|
@ -207,7 +109,7 @@ exports.try_deliver_locally = function try_deliver_locally(message_request) {
|
|||
return undefined;
|
||||
}
|
||||
|
||||
if (exports.contains_bugdown(message_request.content)) {
|
||||
if (markdown.contains_bugdown(message_request.content)) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
@ -226,7 +128,7 @@ exports.edit_locally = function edit_locally(message, raw_content, new_topic) {
|
|||
stream_data.process_message_for_recent_topics(message);
|
||||
}
|
||||
|
||||
exports.apply_markdown(message);
|
||||
markdown.apply_markdown(message);
|
||||
|
||||
// We don't handle unread counts since local messages must be sent by us
|
||||
|
||||
|
@ -311,231 +213,7 @@ function edit_failed_message(message) {
|
|||
}
|
||||
|
||||
|
||||
function escape(html, encode) {
|
||||
return html
|
||||
.replace(!encode ? /&(?!#?\w+;)/g : /&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
function handleUnicodeEmoji(unicode_emoji) {
|
||||
var hex_value = unicode_emoji.codePointAt(0).toString(16);
|
||||
if (emoji.emojis_by_unicode.hasOwnProperty(hex_value)) {
|
||||
var emoji_url = emoji.emojis_by_unicode[hex_value];
|
||||
return '<img alt="' + unicode_emoji + '"' +
|
||||
' class="emoji" src="' + emoji_url + '"' +
|
||||
' title="' + unicode_emoji + '">';
|
||||
}
|
||||
return unicode_emoji;
|
||||
}
|
||||
|
||||
function handleEmoji(emoji_name) {
|
||||
var input_emoji = ':' + emoji_name + ":";
|
||||
var emoji_url;
|
||||
if (emoji.realm_emojis.hasOwnProperty(emoji_name)) {
|
||||
emoji_url = emoji.realm_emojis[emoji_name].emoji_url;
|
||||
return '<img alt="' + input_emoji + '"' +
|
||||
' class="emoji" src="' + emoji_url + '"' +
|
||||
' title="' + input_emoji + '">';
|
||||
} else if (emoji.emojis_by_name.hasOwnProperty(emoji_name)) {
|
||||
emoji_url = emoji.emojis_by_name[emoji_name];
|
||||
return '<img alt="' + input_emoji + '"' +
|
||||
' class="emoji" src="' + emoji_url + '"' +
|
||||
' title="' + input_emoji + '">';
|
||||
}
|
||||
return input_emoji;
|
||||
}
|
||||
|
||||
function handleAvatar(email) {
|
||||
return '<img alt="' + email + '"' +
|
||||
' class="message_body_gravatar" src="/avatar/' + email + '?s=30"' +
|
||||
' title="' + email + '">';
|
||||
}
|
||||
|
||||
function handleStream(streamName) {
|
||||
var stream = stream_data.get_sub(streamName);
|
||||
if (stream === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return '<a class="stream" data-stream-id="' + stream.stream_id + '" ' +
|
||||
'href="' + window.location.origin + '/#narrow/stream/' +
|
||||
hash_util.encodeHashComponent(stream.name) + '"' +
|
||||
'>' + '#' + stream.name + '</a>';
|
||||
|
||||
}
|
||||
|
||||
function handleRealmFilter(pattern, matches) {
|
||||
var url = realm_filter_map[pattern];
|
||||
|
||||
var current_group = 1;
|
||||
_.each(matches, function (match) {
|
||||
var back_ref = "\\" + current_group;
|
||||
url = url.replace(back_ref, match);
|
||||
current_group += 1;
|
||||
});
|
||||
|
||||
return url;
|
||||
}
|
||||
|
||||
function handleTex(tex, fullmatch) {
|
||||
try {
|
||||
return katex.renderToString(tex);
|
||||
} catch (ex) {
|
||||
if (ex.message.startsWith('KaTeX parse error')) { // TeX syntax error
|
||||
return '<span class="tex-error">' + escape(fullmatch) + '</span>';
|
||||
}
|
||||
blueslip.error(ex);
|
||||
}
|
||||
}
|
||||
|
||||
function python_to_js_filter(pattern, url) {
|
||||
// Converts a python named-group regex to a javascript-compatible numbered
|
||||
// group regex... with a regex!
|
||||
var named_group_re = /\(?P<([^>]+?)>/g;
|
||||
var match = named_group_re.exec(pattern);
|
||||
var current_group = 1;
|
||||
while (match) {
|
||||
var name = match[1];
|
||||
// Replace named group with regular matching group
|
||||
pattern = pattern.replace('(?P<' + name + '>', '(');
|
||||
// Replace named reference in url to numbered reference
|
||||
url = url.replace('%(' + name + ')s', '\\' + current_group);
|
||||
|
||||
match = named_group_re.exec(pattern);
|
||||
|
||||
current_group += 1;
|
||||
}
|
||||
// Convert any python in-regex flags to RegExp flags
|
||||
var js_flags = 'g';
|
||||
var inline_flag_re = /\(\?([iLmsux]+)\)/;
|
||||
match = inline_flag_re.exec(pattern);
|
||||
|
||||
// JS regexes only support i (case insensitivity) and m (multiline)
|
||||
// flags, so keep those and ignore the rest
|
||||
if (match) {
|
||||
var py_flags = match[1].split("");
|
||||
_.each(py_flags, function (flag) {
|
||||
if ("im".indexOf(flag) !== -1) {
|
||||
js_flags += flag;
|
||||
}
|
||||
});
|
||||
pattern = pattern.replace(inline_flag_re, "");
|
||||
}
|
||||
return [new RegExp(pattern, js_flags), url];
|
||||
}
|
||||
|
||||
exports.set_realm_filters = function set_realm_filters(realm_filters) {
|
||||
// Update the marked parser with our particular set of realm filters
|
||||
if (!feature_flags.local_echo) {
|
||||
return;
|
||||
}
|
||||
|
||||
realm_filter_map = {};
|
||||
realm_filter_list = [];
|
||||
|
||||
var marked_rules = [];
|
||||
_.each(realm_filters, function (realm_filter) {
|
||||
var pattern = realm_filter[0];
|
||||
var url = realm_filter[1];
|
||||
var js_filters = python_to_js_filter(pattern, url);
|
||||
|
||||
realm_filter_map[js_filters[0]] = js_filters[1];
|
||||
realm_filter_list.push([js_filters[0], js_filters[1]]);
|
||||
marked_rules.push(js_filters[0]);
|
||||
});
|
||||
|
||||
marked.InlineLexer.rules.zulip.realm_filters = marked_rules;
|
||||
};
|
||||
|
||||
$(function () {
|
||||
function disable_markdown_regex(rules, name) {
|
||||
rules[name] = {exec: function () {
|
||||
return false;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Configure the marked markdown parser for our usage
|
||||
var r = new marked.Renderer();
|
||||
|
||||
// No <code> around our code blocks instead a codehilite <div> and disable
|
||||
// class-specific highlighting.
|
||||
r.code = function (code) {
|
||||
return '<div class="codehilite"><pre>'
|
||||
+ escape(code, true)
|
||||
+ '\n</pre></div>\n\n\n';
|
||||
};
|
||||
|
||||
// Our links have title= and target=_blank
|
||||
r.link = function (href, title, text) {
|
||||
title = title || href;
|
||||
var out = '<a href="' + href + '"' + ' target="_blank" title="' +
|
||||
title + '"' + '>' + text + '</a>';
|
||||
return out;
|
||||
};
|
||||
|
||||
// Put a newline after a <br> in the generated HTML to match bugdown
|
||||
r.br = function () {
|
||||
return '<br>\n';
|
||||
};
|
||||
|
||||
function preprocess_code_blocks(src) {
|
||||
return fenced_code.process_fenced_code(src);
|
||||
}
|
||||
|
||||
// Disable ordered lists
|
||||
// We used GFM + tables, so replace the list start regex for that ruleset
|
||||
// We remove the |[\d+]\. that matches the numbering in a numbered list
|
||||
marked.Lexer.rules.tables.list = /^( *)((?:\*)) [\s\S]+?(?:\n+(?=(?: *[\-*_]){3,} *(?:\n+|$))|\n{2,}(?! )(?!\1(?:\*) )\n*|\s*$)/;
|
||||
|
||||
// Disable headings
|
||||
disable_markdown_regex(marked.Lexer.rules.tables, 'heading');
|
||||
disable_markdown_regex(marked.Lexer.rules.tables, 'lheading');
|
||||
|
||||
// Disable __strong__ (keeping **strong**)
|
||||
marked.InlineLexer.rules.zulip.strong = /^\*\*([\s\S]+?)\*\*(?!\*)/;
|
||||
|
||||
// Make sure <del> syntax matches the backend processor
|
||||
marked.InlineLexer.rules.zulip.del = /^(?!<\~)\~\~([^~]+)\~\~(?!\~)/;
|
||||
|
||||
// Disable _emphasis_ (keeping *emphasis*)
|
||||
// Text inside ** must start and end with a word character
|
||||
// it need for things like "const char *x = (char *)y"
|
||||
marked.InlineLexer.rules.zulip.em = /^\*(?!\s+)((?:\*\*|[\s\S])+?)((?:[\S]))\*(?!\*)/;
|
||||
|
||||
// Disable autolink as (a) it is not used in our backend and (b) it interferes with @mentions
|
||||
disable_markdown_regex(marked.InlineLexer.rules.zulip, 'autolink');
|
||||
|
||||
exports.set_realm_filters(page_params.realm_filters);
|
||||
|
||||
// Tell our fenced code preprocessor how to insert arbitrary
|
||||
// HTML into the output. This generated HTML is safe to not escape
|
||||
fenced_code.set_stash_func(function (html) {
|
||||
return marked.stashHtml(html, true);
|
||||
});
|
||||
fenced_code.set_escape_func(escape);
|
||||
|
||||
marked.setOptions({
|
||||
gfm: true,
|
||||
tables: true,
|
||||
breaks: true,
|
||||
pedantic: false,
|
||||
sanitize: true,
|
||||
smartLists: true,
|
||||
smartypants: false,
|
||||
zulip: true,
|
||||
emojiHandler: handleEmoji,
|
||||
avatarHandler: handleAvatar,
|
||||
unicodeEmojiHandler: handleUnicodeEmoji,
|
||||
streamHandler: handleStream,
|
||||
realmFilterHandler: handleRealmFilter,
|
||||
texHandler: handleTex,
|
||||
renderer: r,
|
||||
preprocessors: [preprocess_code_blocks],
|
||||
});
|
||||
|
||||
function on_failed_action(action, callback) {
|
||||
$("#main_div").on("click", "." + action + "-failed-message", function (e) {
|
||||
e.stopPropagation();
|
||||
|
|
|
@ -0,0 +1,341 @@
|
|||
// This contains zulip's frontend markdown implementation; see
|
||||
// docs/markdown.md for docs on our Markdown syntax. The other
|
||||
// main piece in rendering markdown client-side is
|
||||
// static/third/marked/lib/marked.js, which we have significantly
|
||||
// modified from the original implementation.
|
||||
|
||||
var markdown = (function () {
|
||||
|
||||
var exports = {};
|
||||
|
||||
var realm_filter_map = {};
|
||||
var realm_filter_list = [];
|
||||
|
||||
// Regexes that match some of our common bugdown markup
|
||||
var bugdown_re = [
|
||||
// Inline image previews, check for contiguous chars ending in image suffix
|
||||
// To keep the below regexes simple, split them out for the end-of-message case
|
||||
|
||||
/[^\s]*(?:\.bmp|\.gif|\.jpg|\.jpeg|\.png|\.webp)\s+/m,
|
||||
/[^\s]*(?:\.bmp|\.gif|\.jpg|\.jpeg|\.png|\.webp)$/m,
|
||||
|
||||
// Twitter and youtube links are given previews
|
||||
|
||||
/[^\s]*(?:twitter|youtube).com\/[^\s]*/,
|
||||
];
|
||||
|
||||
exports.contains_bugdown = function (content) {
|
||||
// Try to guess whether or not a message has bugdown in it
|
||||
// If it doesn't, we can immediately render it client-side
|
||||
var markedup = _.find(bugdown_re, function (re) {
|
||||
return re.test(content);
|
||||
});
|
||||
return markedup !== undefined;
|
||||
};
|
||||
|
||||
function push_uniquely(lst, elem) {
|
||||
if (!_.contains(lst, elem)) {
|
||||
lst.push(elem);
|
||||
}
|
||||
}
|
||||
|
||||
exports.apply_markdown = function (message) {
|
||||
if (message.flags === undefined) {
|
||||
message.flags = [];
|
||||
}
|
||||
|
||||
// Our python-markdown processor appends two \n\n to input
|
||||
var options = {
|
||||
userMentionHandler: function (name) {
|
||||
var person = people.get_by_name(name);
|
||||
if (person !== undefined) {
|
||||
if (people.is_my_user_id(person.user_id)) {
|
||||
push_uniquely(message.flags, 'mentioned');
|
||||
}
|
||||
return '<span class="user-mention" data-user-id="' + person.user_id + '">' +
|
||||
'@' + person.full_name +
|
||||
'</span>';
|
||||
} else if (name === 'all' || name === 'everyone') {
|
||||
push_uniquely(message.flags, 'mentioned');
|
||||
return '<span class="user-mention" data-user-id="*">' +
|
||||
'@' + name +
|
||||
'</span>';
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
};
|
||||
message.content = marked(message.raw_content + '\n\n', options).trim();
|
||||
};
|
||||
|
||||
exports.add_message_flags = function (message) {
|
||||
// Note: mention flags are set in apply_markdown()
|
||||
|
||||
if (message.raw_content.indexOf('/me ') === 0 &&
|
||||
message.content.indexOf('<p>') === 0 &&
|
||||
message.content.lastIndexOf('</p>') === message.content.length - 4) {
|
||||
message.flags.push('is_me_message');
|
||||
}
|
||||
};
|
||||
|
||||
exports.add_subject_links = function (message) {
|
||||
if (message.type !== 'stream') {
|
||||
message.subject_links = [];
|
||||
return;
|
||||
}
|
||||
var subject = message.subject;
|
||||
var links = [];
|
||||
_.each(realm_filter_list, function (realm_filter) {
|
||||
var pattern = realm_filter[0];
|
||||
var url = realm_filter[1];
|
||||
var match;
|
||||
while ((match = pattern.exec(subject)) !== null) {
|
||||
var link_url = url;
|
||||
var matched_groups = match.slice(1);
|
||||
var i = 0;
|
||||
while (i < matched_groups.length) {
|
||||
var matched_group = matched_groups[i];
|
||||
var current_group = i + 1;
|
||||
var back_ref = "\\" + current_group;
|
||||
link_url = link_url.replace(back_ref, matched_group);
|
||||
i += 1;
|
||||
}
|
||||
links.push(link_url);
|
||||
}
|
||||
});
|
||||
message.subject_links = links;
|
||||
};
|
||||
|
||||
function escape(html, encode) {
|
||||
return html
|
||||
.replace(!encode ? /&(?!#?\w+;)/g : /&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
function handleUnicodeEmoji(unicode_emoji) {
|
||||
var hex_value = unicode_emoji.codePointAt(0).toString(16);
|
||||
if (emoji.emojis_by_unicode.hasOwnProperty(hex_value)) {
|
||||
var emoji_url = emoji.emojis_by_unicode[hex_value];
|
||||
return '<img alt="' + unicode_emoji + '"' +
|
||||
' class="emoji" src="' + emoji_url + '"' +
|
||||
' title="' + unicode_emoji + '">';
|
||||
}
|
||||
return unicode_emoji;
|
||||
}
|
||||
|
||||
function handleEmoji(emoji_name) {
|
||||
var input_emoji = ':' + emoji_name + ":";
|
||||
var emoji_url;
|
||||
if (emoji.realm_emojis.hasOwnProperty(emoji_name)) {
|
||||
emoji_url = emoji.realm_emojis[emoji_name].emoji_url;
|
||||
return '<img alt="' + input_emoji + '"' +
|
||||
' class="emoji" src="' + emoji_url + '"' +
|
||||
' title="' + input_emoji + '">';
|
||||
} else if (emoji.emojis_by_name.hasOwnProperty(emoji_name)) {
|
||||
emoji_url = emoji.emojis_by_name[emoji_name];
|
||||
return '<img alt="' + input_emoji + '"' +
|
||||
' class="emoji" src="' + emoji_url + '"' +
|
||||
' title="' + input_emoji + '">';
|
||||
}
|
||||
return input_emoji;
|
||||
}
|
||||
|
||||
function handleAvatar(email) {
|
||||
return '<img alt="' + email + '"' +
|
||||
' class="message_body_gravatar" src="/avatar/' + email + '?s=30"' +
|
||||
' title="' + email + '">';
|
||||
}
|
||||
|
||||
function handleStream(streamName) {
|
||||
var stream = stream_data.get_sub(streamName);
|
||||
if (stream === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return '<a class="stream" data-stream-id="' + stream.stream_id + '" ' +
|
||||
'href="' + window.location.origin + '/#narrow/stream/' +
|
||||
hash_util.encodeHashComponent(stream.name) + '"' +
|
||||
'>' + '#' + stream.name + '</a>';
|
||||
|
||||
}
|
||||
|
||||
function handleRealmFilter(pattern, matches) {
|
||||
var url = realm_filter_map[pattern];
|
||||
|
||||
var current_group = 1;
|
||||
_.each(matches, function (match) {
|
||||
var back_ref = "\\" + current_group;
|
||||
url = url.replace(back_ref, match);
|
||||
current_group += 1;
|
||||
});
|
||||
|
||||
return url;
|
||||
}
|
||||
|
||||
function handleTex(tex, fullmatch) {
|
||||
try {
|
||||
return katex.renderToString(tex);
|
||||
} catch (ex) {
|
||||
if (ex.message.startsWith('KaTeX parse error')) { // TeX syntax error
|
||||
return '<span class="tex-error">' + escape(fullmatch) + '</span>';
|
||||
}
|
||||
blueslip.error(ex);
|
||||
}
|
||||
}
|
||||
|
||||
function python_to_js_filter(pattern, url) {
|
||||
// Converts a python named-group regex to a javascript-compatible numbered
|
||||
// group regex... with a regex!
|
||||
var named_group_re = /\(?P<([^>]+?)>/g;
|
||||
var match = named_group_re.exec(pattern);
|
||||
var current_group = 1;
|
||||
while (match) {
|
||||
var name = match[1];
|
||||
// Replace named group with regular matching group
|
||||
pattern = pattern.replace('(?P<' + name + '>', '(');
|
||||
// Replace named reference in url to numbered reference
|
||||
url = url.replace('%(' + name + ')s', '\\' + current_group);
|
||||
|
||||
match = named_group_re.exec(pattern);
|
||||
|
||||
current_group += 1;
|
||||
}
|
||||
// Convert any python in-regex flags to RegExp flags
|
||||
var js_flags = 'g';
|
||||
var inline_flag_re = /\(\?([iLmsux]+)\)/;
|
||||
match = inline_flag_re.exec(pattern);
|
||||
|
||||
// JS regexes only support i (case insensitivity) and m (multiline)
|
||||
// flags, so keep those and ignore the rest
|
||||
if (match) {
|
||||
var py_flags = match[1].split("");
|
||||
_.each(py_flags, function (flag) {
|
||||
if ("im".indexOf(flag) !== -1) {
|
||||
js_flags += flag;
|
||||
}
|
||||
});
|
||||
pattern = pattern.replace(inline_flag_re, "");
|
||||
}
|
||||
return [new RegExp(pattern, js_flags), url];
|
||||
}
|
||||
|
||||
exports.set_realm_filters = function (realm_filters) {
|
||||
// Update the marked parser with our particular set of realm filters
|
||||
if (!feature_flags.local_echo) {
|
||||
return;
|
||||
}
|
||||
|
||||
realm_filter_map = {};
|
||||
realm_filter_list = [];
|
||||
|
||||
var marked_rules = [];
|
||||
_.each(realm_filters, function (realm_filter) {
|
||||
var pattern = realm_filter[0];
|
||||
var url = realm_filter[1];
|
||||
var js_filters = python_to_js_filter(pattern, url);
|
||||
|
||||
realm_filter_map[js_filters[0]] = js_filters[1];
|
||||
realm_filter_list.push([js_filters[0], js_filters[1]]);
|
||||
marked_rules.push(js_filters[0]);
|
||||
});
|
||||
|
||||
marked.InlineLexer.rules.zulip.realm_filters = marked_rules;
|
||||
};
|
||||
|
||||
exports.initialize = function () {
|
||||
|
||||
function disable_markdown_regex(rules, name) {
|
||||
rules[name] = {exec: function () {
|
||||
return false;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Configure the marked markdown parser for our usage
|
||||
var r = new marked.Renderer();
|
||||
|
||||
// No <code> around our code blocks instead a codehilite <div> and disable
|
||||
// class-specific highlighting.
|
||||
r.code = function (code) {
|
||||
return '<div class="codehilite"><pre>'
|
||||
+ escape(code, true)
|
||||
+ '\n</pre></div>\n\n\n';
|
||||
};
|
||||
|
||||
// Our links have title= and target=_blank
|
||||
r.link = function (href, title, text) {
|
||||
title = title || href;
|
||||
var out = '<a href="' + href + '"' + ' target="_blank" title="' +
|
||||
title + '"' + '>' + text + '</a>';
|
||||
return out;
|
||||
};
|
||||
|
||||
// Put a newline after a <br> in the generated HTML to match bugdown
|
||||
r.br = function () {
|
||||
return '<br>\n';
|
||||
};
|
||||
|
||||
function preprocess_code_blocks(src) {
|
||||
return fenced_code.process_fenced_code(src);
|
||||
}
|
||||
|
||||
// Disable ordered lists
|
||||
// We used GFM + tables, so replace the list start regex for that ruleset
|
||||
// We remove the |[\d+]\. that matches the numbering in a numbered list
|
||||
marked.Lexer.rules.tables.list = /^( *)((?:\*)) [\s\S]+?(?:\n+(?=(?: *[\-*_]){3,} *(?:\n+|$))|\n{2,}(?! )(?!\1(?:\*) )\n*|\s*$)/;
|
||||
|
||||
// Disable headings
|
||||
disable_markdown_regex(marked.Lexer.rules.tables, 'heading');
|
||||
disable_markdown_regex(marked.Lexer.rules.tables, 'lheading');
|
||||
|
||||
// Disable __strong__ (keeping **strong**)
|
||||
marked.InlineLexer.rules.zulip.strong = /^\*\*([\s\S]+?)\*\*(?!\*)/;
|
||||
|
||||
// Make sure <del> syntax matches the backend processor
|
||||
marked.InlineLexer.rules.zulip.del = /^(?!<\~)\~\~([^~]+)\~\~(?!\~)/;
|
||||
|
||||
// Disable _emphasis_ (keeping *emphasis*)
|
||||
// Text inside ** must start and end with a word character
|
||||
// it need for things like "const char *x = (char *)y"
|
||||
marked.InlineLexer.rules.zulip.em = /^\*(?!\s+)((?:\*\*|[\s\S])+?)((?:[\S]))\*(?!\*)/;
|
||||
|
||||
// Disable autolink as (a) it is not used in our backend and (b) it interferes with @mentions
|
||||
disable_markdown_regex(marked.InlineLexer.rules.zulip, 'autolink');
|
||||
|
||||
exports.set_realm_filters(page_params.realm_filters);
|
||||
|
||||
// Tell our fenced code preprocessor how to insert arbitrary
|
||||
// HTML into the output. This generated HTML is safe to not escape
|
||||
fenced_code.set_stash_func(function (html) {
|
||||
return marked.stashHtml(html, true);
|
||||
});
|
||||
fenced_code.set_escape_func(escape);
|
||||
|
||||
marked.setOptions({
|
||||
gfm: true,
|
||||
tables: true,
|
||||
breaks: true,
|
||||
pedantic: false,
|
||||
sanitize: true,
|
||||
smartLists: true,
|
||||
smartypants: false,
|
||||
zulip: true,
|
||||
emojiHandler: handleEmoji,
|
||||
avatarHandler: handleAvatar,
|
||||
unicodeEmojiHandler: handleUnicodeEmoji,
|
||||
streamHandler: handleStream,
|
||||
realmFilterHandler: handleRealmFilter,
|
||||
texHandler: handleTex,
|
||||
renderer: r,
|
||||
preprocessors: [preprocess_code_blocks],
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
return exports;
|
||||
|
||||
}());
|
||||
if (typeof module !== 'undefined') {
|
||||
module.exports = markdown;
|
||||
}
|
|
@ -139,7 +139,7 @@ function dispatch_normal_event(event) {
|
|||
|
||||
case 'realm_filters':
|
||||
page_params.realm_filters = event.realm_filters;
|
||||
echo.set_realm_filters(page_params.realm_filters);
|
||||
markdown.set_realm_filters(page_params.realm_filters);
|
||||
settings_filters.populate_filters(page_params.realm_filters);
|
||||
break;
|
||||
|
||||
|
|
|
@ -241,6 +241,7 @@ $(function () {
|
|||
|
||||
// initialize other stuff
|
||||
reload.initialize();
|
||||
markdown.initialize();
|
||||
composebox_typeahead.initialize();
|
||||
search.initialize();
|
||||
tutorial.initialize();
|
||||
|
|
|
@ -859,6 +859,7 @@ JS_SPECS = {
|
|||
'js/reload.js',
|
||||
'js/compose_fade.js',
|
||||
'js/fenced_code.js',
|
||||
'js/markdown.js',
|
||||
'js/echo.js',
|
||||
'js/socket.js',
|
||||
'js/compose_state.js',
|
||||
|
|
Loading…
Reference in New Issue