2021-02-28 00:49:36 +01:00
|
|
|
import {isValid} from "date-fns";
|
2022-03-17 03:01:46 +01:00
|
|
|
import katex from "katex"; // eslint-disable-line import/no-unresolved
|
2021-02-28 00:49:36 +01:00
|
|
|
import _ from "lodash";
|
2020-08-01 03:43:15 +02:00
|
|
|
|
2021-02-28 00:49:36 +01:00
|
|
|
import * as fenced_code from "../shared/js/fenced_code";
|
|
|
|
import marked from "../third/marked/lib/marked";
|
2020-07-23 22:41:45 +02:00
|
|
|
|
2020-08-11 01:47:49 +02:00
|
|
|
// This contains zulip's frontend Markdown implementation; see
|
2017-11-08 17:55:36 +01:00
|
|
|
// docs/subsystems/markdown.md for docs on our Markdown syntax. The other
|
2020-08-11 01:47:49 +02:00
|
|
|
// main piece in rendering Markdown client-side is
|
2017-05-09 18:01:43 +02:00
|
|
|
// static/third/marked/lib/marked.js, which we have significantly
|
|
|
|
// modified from the original implementation.
|
|
|
|
|
2018-11-30 00:48:13 +01:00
|
|
|
// Docs: https://zulip.readthedocs.io/en/latest/subsystems/markdown.html
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2022-03-29 18:15:25 +02:00
|
|
|
// If we see preview-related syntax in our content, we will need the
|
|
|
|
// backend to render it.
|
|
|
|
const preview_regexes = [
|
2017-05-09 18:01:43 +02:00
|
|
|
// Inline image previews, check for contiguous chars ending in image suffix
|
|
|
|
// To keep the below regexes simple, split them out for the end-of-message case
|
|
|
|
|
2020-10-07 12:37:15 +02:00
|
|
|
/\S*(?:\.bmp|\.gif|\.jpg|\.jpeg|\.png|\.webp)\)?\s+/m,
|
|
|
|
/\S*(?:\.bmp|\.gif|\.jpg|\.jpeg|\.png|\.webp)\)?$/m,
|
2017-05-09 18:01:43 +02:00
|
|
|
|
|
|
|
// Twitter and youtube links are given previews
|
|
|
|
|
2022-03-29 19:05:30 +02:00
|
|
|
/\S*(?:twitter|youtube)\.com\/\S*/,
|
2017-05-09 18:01:43 +02:00
|
|
|
];
|
|
|
|
|
2022-03-29 18:15:25 +02:00
|
|
|
function contains_preview_link(content) {
|
|
|
|
return preview_regexes.some((re) => re.test(content));
|
|
|
|
}
|
|
|
|
|
2022-04-04 18:05:02 +02:00
|
|
|
export function translate_emoticons_to_names({src, get_emoticon_translations}) {
|
2020-02-15 15:21:32 +01:00
|
|
|
// Translates emoticons in a string to their colon syntax.
|
2022-04-04 18:05:02 +02:00
|
|
|
let translated = src;
|
2020-02-15 15:21:32 +01:00
|
|
|
let replacement_text;
|
2020-07-15 00:34:28 +02:00
|
|
|
const terminal_symbols = ",.;?!()[] \"'\n\t"; // From composebox_typeahead
|
2020-07-15 01:29:15 +02:00
|
|
|
const symbols_except_space = terminal_symbols.replace(" ", "");
|
2020-02-15 15:21:32 +01:00
|
|
|
|
|
|
|
const emoticon_replacer = function (match, g1, offset, str) {
|
|
|
|
const prev_char = str[offset - 1];
|
|
|
|
const next_char = str[offset + match.length];
|
|
|
|
|
|
|
|
const symbol_at_start = terminal_symbols.includes(prev_char);
|
|
|
|
const symbol_at_end = terminal_symbols.includes(next_char);
|
|
|
|
const non_space_at_start = symbols_except_space.includes(prev_char);
|
|
|
|
const non_space_at_end = symbols_except_space.includes(next_char);
|
|
|
|
const valid_start = symbol_at_start || offset === 0;
|
|
|
|
const valid_end = symbol_at_end || offset === str.length - match.length;
|
|
|
|
|
2020-07-15 00:34:28 +02:00
|
|
|
if (non_space_at_start && non_space_at_end) {
|
|
|
|
// Hello!:)?
|
2020-02-15 15:21:32 +01:00
|
|
|
return match;
|
|
|
|
}
|
|
|
|
if (valid_start && valid_end) {
|
|
|
|
return replacement_text;
|
|
|
|
}
|
|
|
|
return match;
|
|
|
|
};
|
|
|
|
|
2022-04-04 18:05:02 +02:00
|
|
|
for (const translation of get_emoticon_translations()) {
|
2020-02-15 15:21:32 +01:00
|
|
|
// We can't pass replacement_text directly into
|
|
|
|
// emoticon_replacer, because emoticon_replacer is
|
|
|
|
// a callback for `replace()`. Instead we just mutate
|
|
|
|
// the `replacement_text` that the function closes on.
|
|
|
|
replacement_text = translation.replacement_text;
|
|
|
|
translated = translated.replace(translation.regex, emoticon_replacer);
|
|
|
|
}
|
|
|
|
|
|
|
|
return translated;
|
2021-02-28 00:49:36 +01:00
|
|
|
}
|
2020-02-15 15:21:32 +01:00
|
|
|
|
2022-04-05 15:44:48 +02:00
|
|
|
function contains_problematic_linkifier({content, get_linkifier_map}) {
|
2021-03-13 18:15:14 +01:00
|
|
|
// If a linkifier doesn't start with some specified characters
|
2017-07-30 21:07:59 +02:00
|
|
|
// then don't render it locally. It is workaround for the fact that
|
|
|
|
// javascript regex doesn't support lookbehind.
|
2022-04-05 15:44:48 +02:00
|
|
|
for (const re of get_linkifier_map().keys()) {
|
2022-03-29 17:14:44 +02:00
|
|
|
const pattern = /[^\s"'(,:<]/.source + re.source + /(?!\w)/.source;
|
2019-11-02 00:06:25 +01:00
|
|
|
const regex = new RegExp(pattern);
|
2022-03-29 17:14:44 +02:00
|
|
|
if (regex.test(content)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2022-03-29 18:10:39 +02:00
|
|
|
}
|
|
|
|
|
2022-04-05 15:44:48 +02:00
|
|
|
function content_contains_backend_only_syntax({content, get_linkifier_map}) {
|
2022-03-29 18:10:39 +02:00
|
|
|
// Try to guess whether or not a message contains syntax that only the
|
|
|
|
// backend Markdown processor can correctly handle.
|
|
|
|
// If it doesn't, we can immediately render it client-side for local echo.
|
2022-04-05 15:44:48 +02:00
|
|
|
return (
|
|
|
|
contains_preview_link(content) ||
|
|
|
|
contains_problematic_linkifier({content, get_linkifier_map})
|
|
|
|
);
|
2021-02-28 00:49:36 +01:00
|
|
|
}
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2022-03-31 15:20:43 +02:00
|
|
|
function parse_with_options({raw_content, helper_config, options}) {
|
2022-03-30 14:20:41 +02:00
|
|
|
// Given the raw markdown content of a message (raw_content)
|
|
|
|
// we return the HTML content (content) and flags.
|
|
|
|
// Our caller passes a helper_config object that has several
|
|
|
|
// helper functions for getting info about users, streams, etc.
|
2022-03-31 15:20:43 +02:00
|
|
|
// And it also passes in options for the marked processor.
|
2022-03-30 14:20:41 +02:00
|
|
|
|
2022-03-24 07:01:47 +01:00
|
|
|
let mentioned = false;
|
|
|
|
let mentioned_group = false;
|
|
|
|
let mentioned_wildcard = false;
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2022-03-31 15:20:43 +02:00
|
|
|
const marked_options = {
|
|
|
|
...options,
|
2020-07-20 22:18:43 +02:00
|
|
|
userMentionHandler(mention, silently) {
|
2020-07-15 01:29:15 +02:00
|
|
|
if (mention === "all" || mention === "everyone" || mention === "stream") {
|
2021-05-19 21:22:58 +02:00
|
|
|
let classes;
|
|
|
|
let display_text;
|
|
|
|
if (silently) {
|
|
|
|
classes = "user-mention silent";
|
|
|
|
display_text = mention;
|
|
|
|
} else {
|
2022-03-24 07:01:47 +01:00
|
|
|
// Wildcard mention
|
|
|
|
mentioned_wildcard = true;
|
2021-05-19 21:22:58 +02:00
|
|
|
display_text = "@" + mention;
|
|
|
|
classes = "user-mention";
|
|
|
|
}
|
|
|
|
|
|
|
|
return `<span class="${classes}" data-user-id="*">${_.escape(display_text)}</span>`;
|
2020-02-15 16:25:00 +01:00
|
|
|
}
|
|
|
|
|
2020-02-16 14:16:46 +01:00
|
|
|
let full_name;
|
|
|
|
let user_id;
|
2018-08-19 03:39:57 +02:00
|
|
|
|
2021-03-23 20:22:04 +01:00
|
|
|
const id_regex = /^(.+)?\|(\d+)$/; // For @**user|id** and @**|id** syntax
|
2020-02-16 14:16:46 +01:00
|
|
|
const match = id_regex.exec(mention);
|
|
|
|
|
2018-08-19 03:39:57 +02:00
|
|
|
if (match) {
|
2020-02-16 14:16:46 +01:00
|
|
|
/*
|
|
|
|
If we have two users named Alice, we want
|
|
|
|
users to provide mentions like this:
|
|
|
|
|
|
|
|
alice|42
|
|
|
|
alice|99
|
|
|
|
|
|
|
|
The autocomplete feature will help users
|
|
|
|
send correct mentions for duplicate names,
|
|
|
|
but we also have to consider the possibility
|
|
|
|
that the user will hand-type something
|
|
|
|
incorrectly, in which case we'll fall
|
|
|
|
through to the other code (which may be a
|
|
|
|
misfeature).
|
|
|
|
*/
|
|
|
|
full_name = match[1];
|
2020-10-07 09:17:30 +02:00
|
|
|
user_id = Number.parseInt(match[2], 10);
|
2020-02-16 14:16:46 +01:00
|
|
|
|
2021-03-23 20:22:04 +01:00
|
|
|
if (full_name === undefined) {
|
|
|
|
// For @**|id** syntax
|
2022-03-31 16:13:02 +02:00
|
|
|
if (!helper_config.is_valid_user_id(user_id)) {
|
2021-03-23 20:22:04 +01:00
|
|
|
// silently ignore invalid user id.
|
|
|
|
user_id = undefined;
|
|
|
|
} else {
|
2022-03-31 16:13:02 +02:00
|
|
|
full_name = helper_config.get_actual_name_from_user_id(user_id);
|
2021-03-23 20:22:04 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// For @**user|id** syntax
|
2022-03-31 16:13:02 +02:00
|
|
|
if (!helper_config.is_valid_full_name_and_user_id(full_name, user_id)) {
|
2021-03-23 20:22:04 +01:00
|
|
|
user_id = undefined;
|
|
|
|
full_name = undefined;
|
|
|
|
}
|
2018-08-19 03:39:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-16 14:16:46 +01:00
|
|
|
if (user_id === undefined) {
|
|
|
|
// Handle normal syntax
|
|
|
|
full_name = mention;
|
2022-03-31 16:13:02 +02:00
|
|
|
user_id = helper_config.get_user_id_from_name(full_name);
|
2020-02-16 14:16:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (user_id === undefined) {
|
2020-02-15 16:51:37 +01:00
|
|
|
// This is nothing to be concerned about--the users
|
|
|
|
// are allowed to hand-type mentions and they may
|
|
|
|
// have had a typo in the name.
|
2020-09-24 07:50:36 +02:00
|
|
|
return undefined;
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
2020-02-15 16:51:37 +01:00
|
|
|
|
|
|
|
// HAPPY PATH! Note that we not only need to return the
|
|
|
|
// appropriate HTML snippet here; we also want to update
|
|
|
|
// flags on the message itself that get used by the message
|
|
|
|
// view code and possibly our filtering code.
|
|
|
|
|
2021-05-19 22:17:20 +02:00
|
|
|
// If I mention "@aLiCe sMITH", I still want "Alice Smith" to
|
|
|
|
// show in the pill.
|
2022-03-31 16:13:02 +02:00
|
|
|
let display_text = helper_config.get_actual_name_from_user_id(user_id);
|
2021-05-19 22:17:20 +02:00
|
|
|
let classes;
|
2020-02-15 16:51:37 +01:00
|
|
|
if (silently) {
|
2021-05-19 22:17:20 +02:00
|
|
|
classes = "user-mention silent";
|
2020-02-15 16:51:37 +01:00
|
|
|
} else {
|
2022-03-31 16:13:02 +02:00
|
|
|
if (helper_config.my_user_id() === user_id) {
|
2022-03-24 07:01:47 +01:00
|
|
|
// Personal mention of current user.
|
|
|
|
mentioned = true;
|
2021-05-19 22:17:20 +02:00
|
|
|
}
|
|
|
|
classes = "user-mention";
|
|
|
|
display_text = "@" + display_text;
|
2020-02-15 16:51:37 +01:00
|
|
|
}
|
2020-02-16 14:16:46 +01:00
|
|
|
|
2021-05-19 22:17:20 +02:00
|
|
|
return `<span class="${classes}" data-user-id="${_.escape(user_id)}">${_.escape(
|
|
|
|
display_text,
|
|
|
|
)}</span>`;
|
2017-05-09 18:01:43 +02:00
|
|
|
},
|
2021-05-19 21:34:58 +02:00
|
|
|
groupMentionHandler(name, silently) {
|
2022-03-31 16:13:02 +02:00
|
|
|
const group = helper_config.get_user_group_from_name(name);
|
2017-11-22 09:11:07 +01:00
|
|
|
if (group !== undefined) {
|
2021-05-19 21:34:58 +02:00
|
|
|
let display_text;
|
|
|
|
let classes;
|
|
|
|
if (silently) {
|
|
|
|
display_text = group.name;
|
|
|
|
classes = "user-group-mention silent";
|
|
|
|
} else {
|
|
|
|
display_text = "@" + group.name;
|
|
|
|
classes = "user-group-mention";
|
2022-03-31 16:13:02 +02:00
|
|
|
if (
|
2022-05-05 10:04:57 +02:00
|
|
|
helper_config.is_member_of_user_group(helper_config.my_user_id(), group.id)
|
2022-03-31 16:13:02 +02:00
|
|
|
) {
|
2022-03-24 07:01:47 +01:00
|
|
|
// Mentioned the current user's group.
|
|
|
|
mentioned_group = true;
|
2021-05-19 21:34:58 +02:00
|
|
|
}
|
2017-11-22 09:11:07 +01:00
|
|
|
}
|
2021-05-19 21:34:58 +02:00
|
|
|
|
|
|
|
return `<span class="${classes}" data-user-group-id="${_.escape(
|
2020-10-07 13:17:55 +02:00
|
|
|
group.id,
|
2021-05-19 21:34:58 +02:00
|
|
|
)}">${_.escape(display_text)}</span>`;
|
2017-11-22 09:11:07 +01:00
|
|
|
}
|
2020-09-24 07:50:36 +02:00
|
|
|
return undefined;
|
2017-11-22 09:11:07 +01:00
|
|
|
},
|
2020-07-20 22:18:43 +02:00
|
|
|
silencedMentionHandler(quote) {
|
2019-01-08 11:30:13 +01:00
|
|
|
// Silence quoted mentions.
|
2022-03-02 03:45:35 +01:00
|
|
|
quote = quote.replace(
|
|
|
|
/(<span class="user-mention)(" data-user-id="(\d+|\*)">)@/g,
|
|
|
|
"$1 silent$2",
|
|
|
|
);
|
2021-05-19 21:38:28 +02:00
|
|
|
|
|
|
|
// Silence quoted user group mentions.
|
2022-03-02 03:45:35 +01:00
|
|
|
quote = quote.replace(
|
|
|
|
/(<span class="user-group-mention)(" data-user-group-id="\d+">)@/g,
|
|
|
|
"$1 silent$2",
|
|
|
|
);
|
2021-05-19 21:38:28 +02:00
|
|
|
|
2019-01-08 11:30:13 +01:00
|
|
|
// In most cases, if you are being mentioned in the message you're quoting, you wouldn't
|
|
|
|
// mention yourself outside of the blockquote (and, above it). If that you do that, the
|
|
|
|
// following mentioned status is false; the backend rendering is authoritative and the
|
|
|
|
// only side effect is the lack red flash on immediately sending the message.
|
2022-03-24 07:01:47 +01:00
|
|
|
//
|
|
|
|
// A better parser would be able to just ignore mentions
|
|
|
|
// inside; we just set all flags to False and let the
|
|
|
|
// server rendering correct the message flags, to avoid a
|
|
|
|
// flash of mention styling.
|
|
|
|
mentioned = false;
|
|
|
|
mentioned_group = false;
|
|
|
|
mentioned_wildcard = false;
|
2019-01-08 11:30:13 +01:00
|
|
|
return quote;
|
|
|
|
},
|
2017-05-09 18:01:43 +02:00
|
|
|
};
|
2022-03-24 07:01:47 +01:00
|
|
|
|
2021-04-25 23:11:21 +02:00
|
|
|
// Our Python-Markdown processor appends two \n\n to input
|
2022-03-31 15:20:43 +02:00
|
|
|
const content = marked(raw_content + "\n\n", marked_options).trim();
|
2022-03-24 07:01:47 +01:00
|
|
|
|
|
|
|
// Simulate message flags for our locally rendered
|
|
|
|
// message. Messages the user themselves sent via the browser are
|
|
|
|
// always marked as read.
|
2022-03-30 14:20:41 +02:00
|
|
|
const flags = ["read"];
|
2022-03-24 07:01:47 +01:00
|
|
|
if (mentioned || mentioned_group) {
|
2022-03-30 14:20:41 +02:00
|
|
|
flags.push("mentioned");
|
2022-03-24 07:01:47 +01:00
|
|
|
}
|
|
|
|
if (mentioned_wildcard) {
|
2022-03-30 14:20:41 +02:00
|
|
|
flags.push("wildcard_mentioned");
|
2022-03-24 07:01:47 +01:00
|
|
|
}
|
|
|
|
|
2022-03-30 14:20:41 +02:00
|
|
|
return {content, flags};
|
2021-02-28 00:49:36 +01:00
|
|
|
}
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2022-04-02 16:38:26 +02:00
|
|
|
export function get_topic_links({topic, get_linkifier_map}) {
|
|
|
|
// We export this for testing purposes, and mobile may want to
|
|
|
|
// use this as well in the future.
|
2021-01-26 07:32:29 +01:00
|
|
|
const links = [];
|
js: Automatically convert _.each to for…of.
This commit was automatically generated by the following script,
followed by lint --fix and a few small manual lint-related cleanups.
import * as babelParser from "recast/parsers/babel";
import * as recast from "recast";
import * as tsParser from "recast/parsers/typescript";
import { builders as b, namedTypes as n } from "ast-types";
import { Context } from "ast-types/lib/path-visitor";
import K from "ast-types/gen/kinds";
import { NodePath } from "ast-types/lib/node-path";
import assert from "assert";
import fs from "fs";
import path from "path";
import process from "process";
const checkExpression = (node: n.Node): node is K.ExpressionKind =>
n.Expression.check(node);
const checkStatement = (node: n.Node): node is K.StatementKind =>
n.Statement.check(node);
for (const file of process.argv.slice(2)) {
console.log("Parsing", file);
const ast = recast.parse(fs.readFileSync(file, { encoding: "utf8" }), {
parser: path.extname(file) === ".ts" ? tsParser : babelParser,
});
let changed = false;
let inLoop = false;
let replaceReturn = false;
const visitLoop = (...args: string[]) =>
function(this: Context, path: NodePath) {
for (const arg of args) {
this.visit(path.get(arg));
}
const old = { inLoop };
inLoop = true;
this.visit(path.get("body"));
inLoop = old.inLoop;
return false;
};
recast.visit(ast, {
visitDoWhileStatement: visitLoop("test"),
visitExpressionStatement(path) {
const { expression, comments } = path.node;
let valueOnly;
if (
n.CallExpression.check(expression) &&
n.MemberExpression.check(expression.callee) &&
!expression.callee.computed &&
n.Identifier.check(expression.callee.object) &&
expression.callee.object.name === "_" &&
n.Identifier.check(expression.callee.property) &&
["each", "forEach"].includes(expression.callee.property.name) &&
[2, 3].includes(expression.arguments.length) &&
checkExpression(expression.arguments[0]) &&
(n.FunctionExpression.check(expression.arguments[1]) ||
n.ArrowFunctionExpression.check(expression.arguments[1])) &&
[1, 2].includes(expression.arguments[1].params.length) &&
n.Identifier.check(expression.arguments[1].params[0]) &&
((valueOnly = expression.arguments[1].params[1] === undefined) ||
n.Identifier.check(expression.arguments[1].params[1])) &&
(expression.arguments[2] === undefined ||
n.ThisExpression.check(expression.arguments[2]))
) {
const old = { inLoop, replaceReturn };
inLoop = false;
replaceReturn = true;
this.visit(
path
.get("expression")
.get("arguments")
.get(1)
.get("body")
);
inLoop = old.inLoop;
replaceReturn = old.replaceReturn;
const [right, { body, params }] = expression.arguments;
const loop = b.forOfStatement(
b.variableDeclaration("let", [
b.variableDeclarator(
valueOnly ? params[0] : b.arrayPattern([params[1], params[0]])
),
]),
valueOnly
? right
: b.callExpression(
b.memberExpression(right, b.identifier("entries")),
[]
),
checkStatement(body) ? body : b.expressionStatement(body)
);
loop.comments = comments;
path.replace(loop);
changed = true;
}
this.traverse(path);
},
visitForStatement: visitLoop("init", "test", "update"),
visitForInStatement: visitLoop("left", "right"),
visitForOfStatement: visitLoop("left", "right"),
visitFunction(path) {
this.visit(path.get("params"));
const old = { replaceReturn };
replaceReturn = false;
this.visit(path.get("body"));
replaceReturn = old.replaceReturn;
return false;
},
visitReturnStatement(path) {
if (replaceReturn) {
assert(!inLoop); // could use labeled continue if this ever fires
const { argument, comments } = path.node;
if (argument === null) {
const s = b.continueStatement();
s.comments = comments;
path.replace(s);
} else {
const s = b.expressionStatement(argument);
s.comments = comments;
path.replace(s, b.continueStatement());
}
return false;
}
this.traverse(path);
},
visitWhileStatement: visitLoop("test"),
});
if (changed) {
console.log("Writing", file);
fs.writeFileSync(file, recast.print(ast).code, { encoding: "utf8" });
}
}
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-02-06 06:19:47 +01:00
|
|
|
|
2022-03-29 21:06:38 +02:00
|
|
|
for (const [pattern, url] of get_linkifier_map().entries()) {
|
2019-11-02 00:06:25 +01:00
|
|
|
let match;
|
2018-11-13 16:41:18 +01:00
|
|
|
while ((match = pattern.exec(topic)) !== null) {
|
2019-11-02 00:06:25 +01:00
|
|
|
let link_url = url;
|
|
|
|
const matched_groups = match.slice(1);
|
|
|
|
let i = 0;
|
2017-05-09 18:01:43 +02:00
|
|
|
while (i < matched_groups.length) {
|
2019-11-02 00:06:25 +01:00
|
|
|
const matched_group = matched_groups[i];
|
|
|
|
const current_group = i + 1;
|
|
|
|
const back_ref = "\\" + current_group;
|
2017-05-09 18:01:43 +02:00
|
|
|
link_url = link_url.replace(back_ref, matched_group);
|
|
|
|
i += 1;
|
|
|
|
}
|
2021-04-25 22:54:23 +02:00
|
|
|
// We store the starting index as well, to sort the order of occurrence of the links
|
|
|
|
// in the topic, similar to the logic implemented in zerver/lib/markdown/__init__.py
|
2022-12-02 09:40:45 +01:00
|
|
|
links.push({url: link_url, text: match[0], index: match.index});
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
js: Automatically convert _.each to for…of.
This commit was automatically generated by the following script,
followed by lint --fix and a few small manual lint-related cleanups.
import * as babelParser from "recast/parsers/babel";
import * as recast from "recast";
import * as tsParser from "recast/parsers/typescript";
import { builders as b, namedTypes as n } from "ast-types";
import { Context } from "ast-types/lib/path-visitor";
import K from "ast-types/gen/kinds";
import { NodePath } from "ast-types/lib/node-path";
import assert from "assert";
import fs from "fs";
import path from "path";
import process from "process";
const checkExpression = (node: n.Node): node is K.ExpressionKind =>
n.Expression.check(node);
const checkStatement = (node: n.Node): node is K.StatementKind =>
n.Statement.check(node);
for (const file of process.argv.slice(2)) {
console.log("Parsing", file);
const ast = recast.parse(fs.readFileSync(file, { encoding: "utf8" }), {
parser: path.extname(file) === ".ts" ? tsParser : babelParser,
});
let changed = false;
let inLoop = false;
let replaceReturn = false;
const visitLoop = (...args: string[]) =>
function(this: Context, path: NodePath) {
for (const arg of args) {
this.visit(path.get(arg));
}
const old = { inLoop };
inLoop = true;
this.visit(path.get("body"));
inLoop = old.inLoop;
return false;
};
recast.visit(ast, {
visitDoWhileStatement: visitLoop("test"),
visitExpressionStatement(path) {
const { expression, comments } = path.node;
let valueOnly;
if (
n.CallExpression.check(expression) &&
n.MemberExpression.check(expression.callee) &&
!expression.callee.computed &&
n.Identifier.check(expression.callee.object) &&
expression.callee.object.name === "_" &&
n.Identifier.check(expression.callee.property) &&
["each", "forEach"].includes(expression.callee.property.name) &&
[2, 3].includes(expression.arguments.length) &&
checkExpression(expression.arguments[0]) &&
(n.FunctionExpression.check(expression.arguments[1]) ||
n.ArrowFunctionExpression.check(expression.arguments[1])) &&
[1, 2].includes(expression.arguments[1].params.length) &&
n.Identifier.check(expression.arguments[1].params[0]) &&
((valueOnly = expression.arguments[1].params[1] === undefined) ||
n.Identifier.check(expression.arguments[1].params[1])) &&
(expression.arguments[2] === undefined ||
n.ThisExpression.check(expression.arguments[2]))
) {
const old = { inLoop, replaceReturn };
inLoop = false;
replaceReturn = true;
this.visit(
path
.get("expression")
.get("arguments")
.get(1)
.get("body")
);
inLoop = old.inLoop;
replaceReturn = old.replaceReturn;
const [right, { body, params }] = expression.arguments;
const loop = b.forOfStatement(
b.variableDeclaration("let", [
b.variableDeclarator(
valueOnly ? params[0] : b.arrayPattern([params[1], params[0]])
),
]),
valueOnly
? right
: b.callExpression(
b.memberExpression(right, b.identifier("entries")),
[]
),
checkStatement(body) ? body : b.expressionStatement(body)
);
loop.comments = comments;
path.replace(loop);
changed = true;
}
this.traverse(path);
},
visitForStatement: visitLoop("init", "test", "update"),
visitForInStatement: visitLoop("left", "right"),
visitForOfStatement: visitLoop("left", "right"),
visitFunction(path) {
this.visit(path.get("params"));
const old = { replaceReturn };
replaceReturn = false;
this.visit(path.get("body"));
replaceReturn = old.replaceReturn;
return false;
},
visitReturnStatement(path) {
if (replaceReturn) {
assert(!inLoop); // could use labeled continue if this ever fires
const { argument, comments } = path.node;
if (argument === null) {
const s = b.continueStatement();
s.comments = comments;
path.replace(s);
} else {
const s = b.expressionStatement(argument);
s.comments = comments;
path.replace(s, b.continueStatement());
}
return false;
}
this.traverse(path);
},
visitWhileStatement: visitLoop("test"),
});
if (changed) {
console.log("Writing", file);
fs.writeFileSync(file, recast.print(ast).code, { encoding: "utf8" });
}
}
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-02-06 06:19:47 +01:00
|
|
|
}
|
2019-05-25 16:10:30 +02:00
|
|
|
|
2020-10-23 02:43:28 +02:00
|
|
|
// Also make raw URLs navigable
|
2020-10-07 12:37:15 +02:00
|
|
|
const url_re = /\b(https?:\/\/[^\s<]+[^\s"'),.:;<\]])/g; // Slightly modified from third/marked.js
|
2022-12-02 09:40:45 +01:00
|
|
|
let match;
|
|
|
|
while ((match = url_re.exec(topic)) !== null) {
|
|
|
|
links.push({url: match[0], text: match[0], index: match.index});
|
2021-01-26 07:32:29 +01:00
|
|
|
}
|
|
|
|
links.sort((a, b) => a.index - b.index);
|
|
|
|
for (const match of links) {
|
|
|
|
delete match.index;
|
2019-05-25 16:10:30 +02:00
|
|
|
}
|
2022-04-02 16:38:26 +02:00
|
|
|
|
|
|
|
return links;
|
2021-02-28 00:49:36 +01:00
|
|
|
}
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2021-02-28 00:49:36 +01:00
|
|
|
export function is_status_message(raw_content) {
|
2020-07-15 01:29:15 +02:00
|
|
|
return raw_content.startsWith("/me ");
|
2021-02-28 00:49:36 +01:00
|
|
|
}
|
2018-01-21 19:27:36 +01:00
|
|
|
|
2019-01-16 10:11:30 +01:00
|
|
|
function make_emoji_span(codepoint, title, alt_text) {
|
2020-10-07 13:17:55 +02:00
|
|
|
return `<span aria-label="${_.escape(title)}" class="emoji emoji-${_.escape(
|
|
|
|
codepoint,
|
|
|
|
)}" role="img" title="${_.escape(title)}">${_.escape(alt_text)}</span>`;
|
2019-01-16 10:11:30 +01:00
|
|
|
}
|
|
|
|
|
2022-03-29 16:28:28 +02:00
|
|
|
function handleUnicodeEmoji({unicode_emoji, get_emoji_name}) {
|
2019-11-02 00:06:25 +01:00
|
|
|
const codepoint = unicode_emoji.codePointAt(0).toString(16);
|
2022-03-29 16:28:28 +02:00
|
|
|
const emoji_name = get_emoji_name(codepoint);
|
markdown: Add helper configuration for mobile.
This refactoring is the first step toward sharing
our markdown code with mobile. This focuses on
the Zulip layer, not the underlying third party `marked`
library.
In this commit we do a one-time initialization to
wire up the markdown functions, but after further
discussions with Greg, it might make more sense
to just pass in helpers on every use of markdown
(which is generally only once per sent message).
I'll address that in follow-up commits.
Even though it looks like a pretty invasive change,
you will note that we barely needed to modify the
node tests to make this pass. And we have pretty
decent test coverage here.
All of the places where we used to depend on
other Zulip modules now use helper functions that
any client (e.g. mobile) can configure themselves.
Or course, in the webapp, we configure these from
modules like people/stream_data/hash_util/etc.
Even in places where markdown used to deal directly with
data structures from other modules, we now use functions.
We may revisit this in a future commit, and we might
just pass data directly for certain things.
I decided to keep the helpers data structure completely flat,
so we don't have ugly nested names like
`helpers.emoji.get_emoji_codepoint`. Because of this,
some of the names aren't 1:1, which I think is fine.
For example, we map `user_groups.is_member_of` to
`is_member_of_user_group`.
It's likely that mobile already has different names
for their versions of these functions, so trying for
fake consistency would only help the webapp. In some
cases, I think the webapp functions have names that
could be improved, but we can clean that up in future
commits, and since the names aren't coupled to markdown
itself (i.e. only the config), we will be less
constrained.
It's worth noting that `marked` has an `options`
data structure that it uses for configuration, but
I didn't piggyback onto it, since the `marked`
options are more at the lexing/parsing layer vs.
the app-data layer stuff that our helpers mostly
help with.
Hopefully it's obvious why I just put helpers in
the top-level namespace for the module rather than
passing it around through multiple layers of the
parser.
There were a couple places in markdown where we
were doing awkward `hasOwnProperty` checks for
emoji-related stuff. Now we use the Python
principle of ask-forgiveness-not-permission and
just handle the getters returning falsy data. (It
should be `undefined`, but any falsy value is
unworkable in the places I changed, so I use
the simpler, less brittle form.)
We also break our direct dependency on
`emoji_codes.json` (with some help from the
prior commit).
In one place I rename streamName to stream_name,
fixing up an ancient naming violation that goes
way back to before this code was even extracted
away from echo.js. I didn't bother to split this
out into a separate commit, since 2 of the 4
lines would be immediately re-modified in the
subsequent commit.
Note that we still depend on `fenced_code`
via the global namespace, instead of simply
requiring it directly or injecting it. The
reason I'm postponing any action there is that
we'll have to change things once we move
markdown into a shared library. (The most
likely outcome is that we'll rename/move both files
at the same time and fix the namespace/require
details as part of that commit.)
Also the markdown code still relies on `_` being
available in the global namespace. We aren't
quite ready to share code with mobile yet, but the
underscore dependency should not be problematic,
since mobile already uses underscore to use the
webapp's shared typing_status module.
2020-02-13 13:54:11 +01:00
|
|
|
|
2020-02-15 13:19:42 +01:00
|
|
|
if (emoji_name) {
|
2020-07-15 01:29:15 +02:00
|
|
|
const alt_text = ":" + emoji_name + ":";
|
2022-03-02 04:10:04 +01:00
|
|
|
const title = emoji_name.replace(/_/g, " ");
|
2019-01-16 10:11:30 +01:00
|
|
|
return make_emoji_span(codepoint, title, alt_text);
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
markdown: Add helper configuration for mobile.
This refactoring is the first step toward sharing
our markdown code with mobile. This focuses on
the Zulip layer, not the underlying third party `marked`
library.
In this commit we do a one-time initialization to
wire up the markdown functions, but after further
discussions with Greg, it might make more sense
to just pass in helpers on every use of markdown
(which is generally only once per sent message).
I'll address that in follow-up commits.
Even though it looks like a pretty invasive change,
you will note that we barely needed to modify the
node tests to make this pass. And we have pretty
decent test coverage here.
All of the places where we used to depend on
other Zulip modules now use helper functions that
any client (e.g. mobile) can configure themselves.
Or course, in the webapp, we configure these from
modules like people/stream_data/hash_util/etc.
Even in places where markdown used to deal directly with
data structures from other modules, we now use functions.
We may revisit this in a future commit, and we might
just pass data directly for certain things.
I decided to keep the helpers data structure completely flat,
so we don't have ugly nested names like
`helpers.emoji.get_emoji_codepoint`. Because of this,
some of the names aren't 1:1, which I think is fine.
For example, we map `user_groups.is_member_of` to
`is_member_of_user_group`.
It's likely that mobile already has different names
for their versions of these functions, so trying for
fake consistency would only help the webapp. In some
cases, I think the webapp functions have names that
could be improved, but we can clean that up in future
commits, and since the names aren't coupled to markdown
itself (i.e. only the config), we will be less
constrained.
It's worth noting that `marked` has an `options`
data structure that it uses for configuration, but
I didn't piggyback onto it, since the `marked`
options are more at the lexing/parsing layer vs.
the app-data layer stuff that our helpers mostly
help with.
Hopefully it's obvious why I just put helpers in
the top-level namespace for the module rather than
passing it around through multiple layers of the
parser.
There were a couple places in markdown where we
were doing awkward `hasOwnProperty` checks for
emoji-related stuff. Now we use the Python
principle of ask-forgiveness-not-permission and
just handle the getters returning falsy data. (It
should be `undefined`, but any falsy value is
unworkable in the places I changed, so I use
the simpler, less brittle form.)
We also break our direct dependency on
`emoji_codes.json` (with some help from the
prior commit).
In one place I rename streamName to stream_name,
fixing up an ancient naming violation that goes
way back to before this code was even extracted
away from echo.js. I didn't bother to split this
out into a separate commit, since 2 of the 4
lines would be immediately re-modified in the
subsequent commit.
Note that we still depend on `fenced_code`
via the global namespace, instead of simply
requiring it directly or injecting it. The
reason I'm postponing any action there is that
we'll have to change things once we move
markdown into a shared library. (The most
likely outcome is that we'll rename/move both files
at the same time and fix the namespace/require
details as part of that commit.)
Also the markdown code still relies on `_` being
available in the global namespace. We aren't
quite ready to share code with mobile yet, but the
underscore dependency should not be problematic,
since mobile already uses underscore to use the
webapp's shared typing_status module.
2020-02-13 13:54:11 +01:00
|
|
|
|
2017-05-09 18:01:43 +02:00
|
|
|
return unicode_emoji;
|
|
|
|
}
|
|
|
|
|
2022-03-29 16:28:28 +02:00
|
|
|
function handleEmoji({emoji_name, get_realm_emoji_url, get_emoji_codepoint}) {
|
2020-07-15 01:29:15 +02:00
|
|
|
const alt_text = ":" + emoji_name + ":";
|
2022-03-02 04:10:04 +01:00
|
|
|
const title = emoji_name.replace(/_/g, " ");
|
2020-02-15 13:19:42 +01:00
|
|
|
|
2020-10-23 02:43:28 +02:00
|
|
|
// Zulip supports both standard/Unicode emoji, served by a
|
2020-02-15 13:19:42 +01:00
|
|
|
// spritesheet and custom realm-specific emoji (served by URL).
|
|
|
|
// We first check if this is a realm emoji, and if so, render it.
|
|
|
|
//
|
2020-10-23 02:43:28 +02:00
|
|
|
// Otherwise we'll look at Unicode emoji to render with an emoji
|
2020-02-15 13:19:42 +01:00
|
|
|
// span using the spritesheet; and if it isn't one of those
|
|
|
|
// either, we pass through the plain text syntax unmodified.
|
2022-03-29 16:28:28 +02:00
|
|
|
const emoji_url = get_realm_emoji_url(emoji_name);
|
2020-02-15 13:19:42 +01:00
|
|
|
|
|
|
|
if (emoji_url) {
|
2020-10-07 13:17:55 +02:00
|
|
|
return `<img alt="${_.escape(alt_text)}" class="emoji" src="${_.escape(
|
|
|
|
emoji_url,
|
|
|
|
)}" title="${_.escape(title)}">`;
|
2020-02-15 13:19:42 +01:00
|
|
|
}
|
|
|
|
|
2022-03-29 16:28:28 +02:00
|
|
|
const codepoint = get_emoji_codepoint(emoji_name);
|
2020-02-15 13:19:42 +01:00
|
|
|
if (codepoint) {
|
2019-01-16 10:11:30 +01:00
|
|
|
return make_emoji_span(codepoint, title, alt_text);
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
2020-02-15 13:19:42 +01:00
|
|
|
|
2017-09-27 19:39:42 +02:00
|
|
|
return alt_text;
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
|
|
|
|
2022-03-29 21:06:38 +02:00
|
|
|
function handleLinkifier({pattern, matches, get_linkifier_map}) {
|
|
|
|
let url = get_linkifier_map().get(pattern);
|
2022-03-29 19:55:46 +02:00
|
|
|
|
|
|
|
let current_group = 1;
|
|
|
|
|
|
|
|
for (const match of matches) {
|
|
|
|
const back_ref = "\\" + current_group;
|
|
|
|
url = url.replace(back_ref, match);
|
|
|
|
current_group += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return url;
|
|
|
|
}
|
|
|
|
|
2018-07-18 14:36:04 +02:00
|
|
|
function handleTimestamp(time) {
|
|
|
|
let timeobject;
|
2020-10-07 09:17:30 +02:00
|
|
|
if (Number.isNaN(Number(time))) {
|
2021-08-03 22:04:48 +02:00
|
|
|
timeobject = new Date(time); // not a Unix timestamp
|
2018-07-18 14:36:04 +02:00
|
|
|
} else {
|
|
|
|
// JavaScript dates are in milliseconds, Unix timestamps are in seconds
|
2020-09-29 22:20:46 +02:00
|
|
|
timeobject = new Date(time * 1000);
|
2018-07-18 14:36:04 +02:00
|
|
|
}
|
|
|
|
|
2020-06-18 01:32:24 +02:00
|
|
|
const escaped_time = _.escape(time);
|
2021-04-28 08:20:44 +02:00
|
|
|
if (!isValid(timeobject)) {
|
2020-06-18 01:32:24 +02:00
|
|
|
// Unsupported time format: rerender accordingly.
|
2020-07-06 17:30:53 +02:00
|
|
|
|
|
|
|
// We do not show an error on these formats in local echo because
|
|
|
|
// there is a chance that the server would interpret it successfully
|
|
|
|
// and if it does, the jumping from the error message to a rendered
|
|
|
|
// timestamp doesn't look good.
|
|
|
|
return `<span>${escaped_time}</span>`;
|
2018-07-18 14:36:04 +02:00
|
|
|
}
|
2020-06-18 01:32:24 +02:00
|
|
|
|
|
|
|
// Use html5 <time> tag for valid timestamps.
|
|
|
|
// render time without milliseconds.
|
2020-07-15 01:29:15 +02:00
|
|
|
const escaped_isotime = _.escape(timeobject.toISOString().split(".")[0] + "Z");
|
2020-06-18 01:32:24 +02:00
|
|
|
return `<time datetime="${escaped_isotime}">${escaped_time}</time>`;
|
2018-07-18 14:36:04 +02:00
|
|
|
}
|
|
|
|
|
2022-03-31 16:13:02 +02:00
|
|
|
function handleStream({stream_name, get_stream_by_name, stream_hash}) {
|
|
|
|
const stream = get_stream_by_name(stream_name);
|
2017-05-09 18:01:43 +02:00
|
|
|
if (stream === undefined) {
|
2020-09-24 07:50:36 +02:00
|
|
|
return undefined;
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
2022-03-31 16:13:02 +02:00
|
|
|
const href = stream_hash(stream.stream_id);
|
2020-10-07 13:17:55 +02:00
|
|
|
return `<a class="stream" data-stream-id="${_.escape(stream.stream_id)}" href="/${_.escape(
|
|
|
|
href,
|
|
|
|
)}">#${_.escape(stream.name)}</a>`;
|
2019-06-21 20:47:09 +02:00
|
|
|
}
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2022-03-31 16:13:02 +02:00
|
|
|
function handleStreamTopic({stream_name, topic, get_stream_by_name, stream_topic_hash}) {
|
|
|
|
const stream = get_stream_by_name(stream_name);
|
2019-06-21 20:47:09 +02:00
|
|
|
if (stream === undefined || !topic) {
|
2020-09-24 07:50:36 +02:00
|
|
|
return undefined;
|
2019-06-21 20:47:09 +02:00
|
|
|
}
|
2022-03-31 16:13:02 +02:00
|
|
|
const href = stream_topic_hash(stream.stream_id, topic);
|
2020-10-07 13:17:55 +02:00
|
|
|
const text = `#${stream.name} > ${topic}`;
|
|
|
|
return `<a class="stream-topic" data-stream-id="${_.escape(
|
|
|
|
stream.stream_id,
|
|
|
|
)}" href="/${_.escape(href)}">${_.escape(text)}</a>`;
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
function handleTex(tex, fullmatch) {
|
|
|
|
try {
|
|
|
|
return katex.renderToString(tex);
|
2020-10-07 10:20:41 +02:00
|
|
|
} catch (error) {
|
|
|
|
if (error.message.startsWith("KaTeX parse error")) {
|
2020-07-15 00:34:28 +02:00
|
|
|
// TeX syntax error
|
2020-10-07 13:17:55 +02:00
|
|
|
return `<span class="tex-error">${_.escape(fullmatch)}</span>`;
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
2022-04-05 17:22:53 +02:00
|
|
|
throw new Error(error.message);
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-04 17:43:21 +02:00
|
|
|
export function parse({raw_content, helper_config}) {
|
2022-04-04 18:52:24 +02:00
|
|
|
function get_linkifier_regexes() {
|
|
|
|
return Array.from(helper_config.get_linkifier_map().keys());
|
|
|
|
}
|
|
|
|
|
2017-05-09 18:01:43 +02:00
|
|
|
function disable_markdown_regex(rules, name) {
|
2020-07-15 00:34:28 +02:00
|
|
|
rules[name] = {
|
2020-07-20 22:18:43 +02:00
|
|
|
exec() {
|
2020-07-15 00:34:28 +02:00
|
|
|
return false;
|
|
|
|
},
|
|
|
|
};
|
2017-05-09 18:01:43 +02:00
|
|
|
}
|
|
|
|
|
2020-08-11 01:47:49 +02:00
|
|
|
// Configure the marked Markdown parser for our usage
|
2022-04-04 17:59:55 +02:00
|
|
|
const renderer = new marked.Renderer();
|
2017-05-09 18:01:43 +02:00
|
|
|
|
|
|
|
// No <code> around our code blocks instead a codehilite <div> and disable
|
|
|
|
// class-specific highlighting.
|
2022-04-04 17:59:55 +02:00
|
|
|
renderer.code = (code) => fenced_code.wrap_code(code) + "\n\n";
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2020-05-09 03:44:56 +02:00
|
|
|
// Prohibit empty links for some reason.
|
2022-04-04 17:59:55 +02:00
|
|
|
const old_link = renderer.link;
|
|
|
|
renderer.link = (href, title, text) =>
|
|
|
|
old_link.call(renderer, href, title, text.trim() ? text : href);
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2020-08-11 01:47:49 +02:00
|
|
|
// Put a newline after a <br> in the generated HTML to match Markdown
|
2022-04-04 17:59:55 +02:00
|
|
|
renderer.br = function () {
|
2020-07-15 01:29:15 +02:00
|
|
|
return "<br>\n";
|
2017-05-09 18:01:43 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
function preprocess_code_blocks(src) {
|
|
|
|
return fenced_code.process_fenced_code(src);
|
|
|
|
}
|
|
|
|
|
2018-01-15 19:36:32 +01:00
|
|
|
function preprocess_translate_emoticons(src) {
|
2022-04-04 17:43:21 +02:00
|
|
|
if (!helper_config.should_translate_emoticons()) {
|
2018-01-15 19:36:32 +01:00
|
|
|
return src;
|
|
|
|
}
|
|
|
|
|
|
|
|
// In this scenario, the message has to be from the user, so the only
|
|
|
|
// requirement should be that they have the setting on.
|
2022-04-04 18:05:02 +02:00
|
|
|
return translate_emoticons_to_names({
|
|
|
|
src,
|
|
|
|
get_emoticon_translations: helper_config.get_emoticon_translations,
|
|
|
|
});
|
2018-01-15 19:36:32 +01:00
|
|
|
}
|
|
|
|
|
2022-02-08 00:13:33 +01:00
|
|
|
// Disable headings
|
2019-07-31 08:04:32 +02:00
|
|
|
// We only keep the # Heading format.
|
2020-07-15 01:29:15 +02:00
|
|
|
disable_markdown_regex(marked.Lexer.rules.tables, "lheading");
|
2017-05-09 18:01:43 +02:00
|
|
|
|
|
|
|
// Disable __strong__ (keeping **strong**)
|
2020-10-07 12:37:15 +02:00
|
|
|
marked.InlineLexer.rules.zulip.strong = /^\*\*([\S\s]+?)\*\*(?!\*)/;
|
2017-05-09 18:01:43 +02:00
|
|
|
|
|
|
|
// Make sure <del> syntax matches the backend processor
|
2020-07-16 23:08:05 +02:00
|
|
|
marked.InlineLexer.rules.zulip.del = /^(?!<~)~~([^~]+)~~(?!~)/;
|
2017-05-09 18:01:43 +02:00
|
|
|
|
|
|
|
// Disable _emphasis_ (keeping *emphasis*)
|
|
|
|
// Text inside ** must start and end with a word character
|
2018-04-22 19:53:04 +02:00
|
|
|
// to prevent mis-parsing things like "char **x = (char **)y"
|
2020-10-07 12:37:15 +02:00
|
|
|
marked.InlineLexer.rules.zulip.em = /^\*(?!\s+)((?:\*\*|[\S\s])+?)(\S)\*(?!\*)/;
|
2017-05-09 18:01:43 +02:00
|
|
|
|
|
|
|
// Disable autolink as (a) it is not used in our backend and (b) it interferes with @mentions
|
2020-07-15 01:29:15 +02:00
|
|
|
disable_markdown_regex(marked.InlineLexer.rules.zulip, "autolink");
|
2017-05-09 18:01:43 +02:00
|
|
|
|
|
|
|
// Tell our fenced code preprocessor how to insert arbitrary
|
|
|
|
// HTML into the output. This generated HTML is safe to not escape
|
2020-07-02 01:45:54 +02:00
|
|
|
fenced_code.set_stash_func((html) => marked.stashHtml(html, true));
|
2017-05-09 18:01:43 +02:00
|
|
|
|
2022-03-31 16:13:02 +02:00
|
|
|
function streamHandler(stream_name) {
|
|
|
|
return handleStream({
|
|
|
|
stream_name,
|
|
|
|
get_stream_by_name: helper_config.get_stream_by_name,
|
|
|
|
stream_hash: helper_config.stream_hash,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
function streamTopicHandler(stream_name, topic) {
|
|
|
|
return handleStreamTopic({
|
|
|
|
stream_name,
|
|
|
|
topic,
|
|
|
|
get_stream_by_name: helper_config.get_stream_by_name,
|
|
|
|
stream_topic_hash: helper_config.stream_topic_hash,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-03-29 16:28:28 +02:00
|
|
|
function emojiHandler(emoji_name) {
|
|
|
|
return handleEmoji({
|
|
|
|
emoji_name,
|
|
|
|
get_realm_emoji_url: helper_config.get_realm_emoji_url,
|
|
|
|
get_emoji_codepoint: helper_config.get_emoji_codepoint,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
function unicodeEmojiHandler(unicode_emoji) {
|
|
|
|
return handleUnicodeEmoji({
|
|
|
|
unicode_emoji,
|
|
|
|
get_emoji_name: helper_config.get_emoji_name,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-03-29 21:06:38 +02:00
|
|
|
function linkifierHandler(pattern, matches) {
|
|
|
|
return handleLinkifier({
|
|
|
|
pattern,
|
|
|
|
matches,
|
|
|
|
get_linkifier_map: helper_config.get_linkifier_map,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-03-31 15:20:43 +02:00
|
|
|
const options = {
|
2022-03-29 22:39:59 +02:00
|
|
|
get_linkifier_regexes,
|
2022-03-29 21:06:38 +02:00
|
|
|
linkifierHandler,
|
2022-03-29 22:39:59 +02:00
|
|
|
emojiHandler,
|
2022-03-29 16:28:28 +02:00
|
|
|
unicodeEmojiHandler,
|
2022-03-31 16:13:02 +02:00
|
|
|
streamHandler,
|
|
|
|
streamTopicHandler,
|
2017-05-09 18:01:43 +02:00
|
|
|
texHandler: handleTex,
|
2018-07-18 14:36:04 +02:00
|
|
|
timestampHandler: handleTimestamp,
|
2022-04-04 17:43:21 +02:00
|
|
|
gfm: true,
|
|
|
|
tables: true,
|
|
|
|
breaks: true,
|
|
|
|
pedantic: false,
|
|
|
|
sanitize: true,
|
|
|
|
smartLists: true,
|
|
|
|
smartypants: false,
|
|
|
|
zulip: true,
|
2022-04-04 17:59:55 +02:00
|
|
|
renderer,
|
2022-04-04 17:43:21 +02:00
|
|
|
preprocessors: [preprocess_code_blocks, preprocess_translate_emoticons],
|
2022-03-31 15:20:43 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
return parse_with_options({raw_content, helper_config, options});
|
2021-02-28 00:49:36 +01:00
|
|
|
}
|
2022-03-30 14:20:41 +02:00
|
|
|
|
2022-09-08 22:51:43 +02:00
|
|
|
// NOTE: Everything below this line is likely to be web-specific
|
2022-03-30 14:20:41 +02:00
|
|
|
// and won't be used by future platforms such as mobile.
|
|
|
|
// We may eventually move this code to a new file, but we want
|
|
|
|
// to wait till the dust settles a bit on some other changes first.
|
|
|
|
|
2022-09-08 22:51:43 +02:00
|
|
|
let web_app_helpers;
|
2022-03-30 14:20:41 +02:00
|
|
|
|
|
|
|
export function initialize(helper_config) {
|
2022-09-08 22:51:43 +02:00
|
|
|
// This is generally only intended to be called by the web app. Most
|
2022-03-30 14:20:41 +02:00
|
|
|
// other platforms should call setup().
|
2022-09-08 22:51:43 +02:00
|
|
|
web_app_helpers = helper_config;
|
2022-03-30 14:20:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
export function apply_markdown(message) {
|
2022-09-08 22:51:43 +02:00
|
|
|
// This is generally only intended to be called by the web app. Most
|
2022-03-30 14:20:41 +02:00
|
|
|
// other platforms should call parse().
|
|
|
|
const raw_content = message.raw_content;
|
2022-09-08 22:51:43 +02:00
|
|
|
const {content, flags} = parse({raw_content, helper_config: web_app_helpers});
|
2022-03-30 14:20:41 +02:00
|
|
|
message.content = content;
|
|
|
|
message.flags = flags;
|
|
|
|
message.is_me_message = is_status_message(raw_content);
|
|
|
|
}
|
2022-03-31 14:16:20 +02:00
|
|
|
|
2022-03-29 21:06:38 +02:00
|
|
|
export function add_topic_links(message) {
|
2022-04-02 16:38:26 +02:00
|
|
|
if (message.type !== "stream") {
|
|
|
|
message.topic_links = [];
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
message.topic_links = get_topic_links({
|
|
|
|
topic: message.topic,
|
2022-09-08 22:51:43 +02:00
|
|
|
get_linkifier_map: web_app_helpers.get_linkifier_map,
|
2022-04-02 16:38:26 +02:00
|
|
|
});
|
2022-03-29 21:06:38 +02:00
|
|
|
}
|
|
|
|
|
2022-04-05 15:44:48 +02:00
|
|
|
export function contains_backend_only_syntax(content) {
|
|
|
|
return content_contains_backend_only_syntax({
|
|
|
|
content,
|
2022-09-08 22:51:43 +02:00
|
|
|
get_linkifier_map: web_app_helpers.get_linkifier_map,
|
2022-04-05 15:44:48 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-03-31 14:16:20 +02:00
|
|
|
export function parse_non_message(raw_content) {
|
|
|
|
// Occasionally we get markdown from the server that is not technically
|
zcommands: Fully parse messages from the server.
Before this change, we would use **some** options relating
to parsing messages, but not all of them. The reason for
this was completely unintentional.
It's mostly a moot point, since the server sends back pretty
generic messages when you do something like invoke the
"/dark" command, and the message would parse the same way
whether or not the parser was looking for things like user
mentions or stream links.
In order to make this code predictable, I had to decide
whether we do a completely vanilla parse or a full message
parse. My decision now is mostly tactical. It's a trivial
one-line change to just use all the options for message
parsing, whereas it requires a major overhaul to allow a
vanilla parse.
I also predict that we will eventually want to parse these
server responses as if they were messages. I doubt the
zcommand responses would ever take advantage of it, but I
could imagine things like nag messages wanting to use user
mentions.
Even if my predictions are wrong, my decisions here are
pretty easy to reverse once we learn more.
For the particular case of zcommands, it is puzzling to me
why the server doesn't just send back HTML, but I don't want
to open that can of worms yet, as that would technically be
an API change.
For now I am happy with the one-line fix.
2022-03-31 14:54:14 +02:00
|
|
|
// a message, but we want to convert it to HTML. Note that we parse
|
|
|
|
// raw_content exactly as if it were a Zulip message, so we will
|
|
|
|
// handle things like mentions, stream links, and linkifiers.
|
2022-09-08 22:51:43 +02:00
|
|
|
return parse({raw_content, helper_config: web_app_helpers}).content;
|
2022-03-31 14:16:20 +02:00
|
|
|
}
|