2012-10-15 22:03:50 +02:00
|
|
|
import markdown
|
2012-10-22 05:06:28 +02:00
|
|
|
import logging
|
|
|
|
import traceback
|
2012-12-04 19:57:54 +01:00
|
|
|
import urlparse
|
2012-10-25 21:38:47 +02:00
|
|
|
import re
|
2012-10-15 22:03:50 +02:00
|
|
|
|
2013-01-31 19:57:25 +01:00
|
|
|
from django.core import mail
|
|
|
|
|
2012-10-20 05:34:14 +02:00
|
|
|
from zephyr.lib.avatar import gravatar_hash
|
2012-11-19 18:31:03 +01:00
|
|
|
from zephyr.lib.bugdown import codehilite, fenced_code
|
2013-01-24 19:35:20 +01:00
|
|
|
from zephyr.lib.bugdown.fenced_code import FENCE_RE
|
2013-01-29 21:47:53 +01:00
|
|
|
from zephyr.lib.timeout import timeout
|
2012-10-17 04:42:19 +02:00
|
|
|
|
2013-03-01 19:20:53 +01:00
|
|
|
class InlineImagePreviewProcessor(markdown.treeprocessors.Treeprocessor):
|
|
|
|
def is_image(self, url):
|
|
|
|
# List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
|
|
|
|
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]:
|
|
|
|
if url.lower().endswith(ext):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2013-03-04 16:38:42 +01:00
|
|
|
def dropbox_image(self, url):
|
|
|
|
if not self.is_image(url):
|
|
|
|
return None
|
|
|
|
parsed_url = urlparse.urlparse(url)
|
|
|
|
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')) \
|
|
|
|
and parsed_url.path.startswith('/s/'):
|
|
|
|
return "%s?dl=1" % (url,)
|
|
|
|
return None
|
|
|
|
|
2013-03-01 19:20:53 +01:00
|
|
|
def youtube_image(self, url):
|
|
|
|
# Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s
|
|
|
|
# If it matches, match.group(2) is the video id.
|
|
|
|
youtube_re = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))?([0-9A-Za-z_-]+)(?(1).+)?$'
|
|
|
|
match = re.match(youtube_re, url)
|
|
|
|
if match is None:
|
|
|
|
return None
|
|
|
|
return "http://i.ytimg.com/vi/%s/default.jpg" % (match.group(2),)
|
|
|
|
|
|
|
|
# Search the tree for <a> tags and read their href values
|
|
|
|
def find_images(self, root):
|
|
|
|
images = []
|
|
|
|
stack = [root]
|
|
|
|
|
|
|
|
while stack:
|
|
|
|
currElement = stack.pop()
|
|
|
|
for child in currElement.getchildren():
|
2013-03-04 16:38:42 +01:00
|
|
|
if child.getchildren():
|
|
|
|
stack.append(child)
|
|
|
|
|
2013-03-01 19:20:53 +01:00
|
|
|
if child.tag == "a":
|
|
|
|
url = child.get("href")
|
2013-03-04 16:38:42 +01:00
|
|
|
youtube = self.youtube_image(url)
|
|
|
|
if youtube is not None:
|
|
|
|
images.append((youtube, url))
|
|
|
|
continue
|
|
|
|
dropbox = self.dropbox_image(url)
|
|
|
|
if dropbox is not None:
|
|
|
|
images.append((dropbox, url))
|
|
|
|
continue
|
2013-03-01 19:20:53 +01:00
|
|
|
if self.is_image(url):
|
|
|
|
images.append((url, url))
|
2013-03-04 16:38:42 +01:00
|
|
|
continue
|
2013-03-01 19:20:53 +01:00
|
|
|
return images
|
|
|
|
|
|
|
|
def run(self, root):
|
|
|
|
image_urls = self.find_images(root)
|
|
|
|
for (url, link) in image_urls:
|
|
|
|
a = markdown.util.etree.SubElement(root, "a")
|
|
|
|
a.set("href", link)
|
|
|
|
a.set("target", "_blank")
|
|
|
|
a.set("title", link)
|
|
|
|
img = markdown.util.etree.SubElement(a, "img")
|
|
|
|
img.set("src", url)
|
|
|
|
img.set("class", "message_inline_image")
|
|
|
|
|
|
|
|
return root
|
|
|
|
|
2012-10-17 04:42:19 +02:00
|
|
|
class Gravatar(markdown.inlinepatterns.Pattern):
|
|
|
|
def handleMatch(self, match):
|
|
|
|
img = markdown.util.etree.Element('img')
|
|
|
|
img.set('class', 'message_body_gravatar img-rounded')
|
|
|
|
img.set('src', 'https://secure.gravatar.com/avatar/%s?d=identicon&s=30'
|
2012-10-22 02:15:44 +02:00
|
|
|
% (gravatar_hash(match.group('email')),))
|
2012-10-17 04:42:19 +02:00
|
|
|
return img
|
|
|
|
|
2012-12-04 20:15:50 +01:00
|
|
|
def fixup_link(link):
|
|
|
|
"""Set certain attributes we want on every link."""
|
|
|
|
link.set('target', '_blank')
|
|
|
|
link.set('title', link.get('href'))
|
|
|
|
|
2013-02-01 23:15:05 +01:00
|
|
|
|
|
|
|
def sanitize_url(url):
|
|
|
|
"""
|
|
|
|
Sanitize a url against xss attacks.
|
|
|
|
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
parts = urlparse.urlparse(url.replace(' ', '%20'))
|
|
|
|
scheme, netloc, path, params, query, fragment = parts
|
|
|
|
except ValueError:
|
|
|
|
# Bad url - so bad it couldn't be parsed.
|
|
|
|
return ''
|
|
|
|
|
|
|
|
# Humbug modification: If scheme is not specified, assume http://
|
|
|
|
# It's unlikely that users want relative links within humbughq.com.
|
|
|
|
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
|
|
|
|
if not scheme:
|
|
|
|
return sanitize_url('http://' + url)
|
|
|
|
|
|
|
|
locless_schemes = ['', 'mailto', 'news']
|
|
|
|
if netloc == '' and scheme not in locless_schemes:
|
|
|
|
# This fails regardless of anything else.
|
|
|
|
# Return immediately to save additional proccessing
|
2013-02-26 22:41:39 +01:00
|
|
|
return None
|
2013-02-01 23:15:05 +01:00
|
|
|
|
|
|
|
for part in parts[2:]:
|
|
|
|
if ":" in part:
|
|
|
|
# Not a safe url
|
2013-02-26 22:41:39 +01:00
|
|
|
return None
|
2013-02-01 23:15:05 +01:00
|
|
|
|
|
|
|
# Url passes all tests. Return url as-is.
|
|
|
|
return urlparse.urlunparse(parts)
|
|
|
|
|
2013-02-11 20:49:48 +01:00
|
|
|
def url_to_a(url):
|
|
|
|
a = markdown.util.etree.Element('a')
|
|
|
|
if '@' in url:
|
|
|
|
href = 'mailto:' + url
|
|
|
|
else:
|
|
|
|
href = url
|
2013-02-26 22:41:39 +01:00
|
|
|
|
|
|
|
href = sanitize_url(href)
|
|
|
|
if href is None:
|
|
|
|
# Rejected by sanitize_url; render it as plain text.
|
|
|
|
return url
|
|
|
|
|
|
|
|
a.set('href', href)
|
2013-02-11 20:49:48 +01:00
|
|
|
a.text = url
|
|
|
|
fixup_link(a)
|
|
|
|
return a
|
|
|
|
|
2012-10-22 02:32:18 +02:00
|
|
|
class AutoLink(markdown.inlinepatterns.Pattern):
|
|
|
|
def handleMatch(self, match):
|
|
|
|
url = match.group('url')
|
2013-02-11 20:49:48 +01:00
|
|
|
# As this will also match already-matched https?:// links,
|
|
|
|
# don't doubly-link them
|
|
|
|
if url[:5] == 'http:' or url[:6] == 'https:':
|
|
|
|
return url
|
|
|
|
return url_to_a(url)
|
2013-02-01 23:15:05 +01:00
|
|
|
|
2013-02-11 20:49:48 +01:00
|
|
|
class HttpLink(markdown.inlinepatterns.Pattern):
|
|
|
|
def handleMatch(self, match):
|
|
|
|
url = match.group('url')
|
|
|
|
return url_to_a(url)
|
2012-10-22 02:32:18 +02:00
|
|
|
|
2012-11-02 18:25:37 +01:00
|
|
|
class UListProcessor(markdown.blockprocessors.OListProcessor):
|
|
|
|
""" Process unordered list blocks.
|
|
|
|
|
|
|
|
Based on markdown.blockprocessors.UListProcessor, but does not accept
|
2013-01-23 23:07:01 +01:00
|
|
|
'+' or '-' as a bullet character."""
|
2012-11-02 18:25:37 +01:00
|
|
|
|
|
|
|
TAG = 'ul'
|
2013-01-23 23:07:01 +01:00
|
|
|
RE = re.compile(r'^[ ]{0,3}[*][ ]+(.*)')
|
2012-11-02 18:25:37 +01:00
|
|
|
|
2013-01-24 19:35:20 +01:00
|
|
|
class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor):
|
|
|
|
""" Allows unordered list blocks that come directly after a
|
|
|
|
paragraph to be rendered as an unordered list
|
|
|
|
|
|
|
|
Detects paragraphs that have a matching list item that comes
|
|
|
|
directly after a line of text, and inserts a newline between
|
|
|
|
to satisfy Markdown"""
|
|
|
|
|
|
|
|
LI_RE = re.compile(r'^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE)
|
|
|
|
HANGING_ULIST_RE = re.compile(r'^.+\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE)
|
|
|
|
|
|
|
|
def run(self, lines):
|
|
|
|
""" Insert a newline between a paragraph and ulist if missing """
|
|
|
|
inserts = 0
|
|
|
|
fence = None
|
|
|
|
copy = lines[:]
|
|
|
|
for i in xrange(len(lines) - 1):
|
|
|
|
# Ignore anything that is inside a fenced code block
|
|
|
|
m = FENCE_RE.match(lines[i])
|
|
|
|
if not fence and m:
|
|
|
|
fence = m.group('fence')
|
|
|
|
elif fence and m and fence == m.group('fence'):
|
|
|
|
fence = None
|
|
|
|
|
|
|
|
# If we're not in a fenced block and we detect an upcoming list
|
|
|
|
# hanging off a paragraph, add a newline
|
|
|
|
if not fence and lines[i] and \
|
|
|
|
self.LI_RE.match(lines[i+1]) and not self.LI_RE.match(lines[i]):
|
|
|
|
copy.insert(i+inserts+1, '')
|
|
|
|
inserts += 1
|
|
|
|
return copy
|
|
|
|
|
2012-12-04 19:57:54 +01:00
|
|
|
# Based on markdown.inlinepatterns.LinkPattern
|
|
|
|
class LinkPattern(markdown.inlinepatterns.Pattern):
|
|
|
|
""" Return a link element from the given match. """
|
|
|
|
def handleMatch(self, m):
|
2013-02-26 22:41:39 +01:00
|
|
|
# Return the original link syntax as plain text,
|
|
|
|
# if the link fails checks.
|
|
|
|
orig_syntax = m.group(0)
|
|
|
|
|
2012-12-04 19:57:54 +01:00
|
|
|
href = m.group(9)
|
2013-02-26 22:41:39 +01:00
|
|
|
if not href:
|
|
|
|
return orig_syntax
|
2012-12-04 19:57:54 +01:00
|
|
|
|
2013-02-26 22:41:39 +01:00
|
|
|
if href[0] == "<":
|
|
|
|
href = href[1:-1]
|
|
|
|
href = sanitize_url(self.unescape(href.strip()))
|
|
|
|
if href is None:
|
|
|
|
return orig_syntax
|
2012-12-04 19:57:54 +01:00
|
|
|
|
2013-02-26 22:41:39 +01:00
|
|
|
el = markdown.util.etree.Element('a')
|
|
|
|
el.text = m.group(2)
|
|
|
|
el.set('href', href)
|
2012-12-04 20:15:50 +01:00
|
|
|
fixup_link(el)
|
2012-12-04 19:57:54 +01:00
|
|
|
return el
|
|
|
|
|
2012-10-16 17:35:58 +02:00
|
|
|
class Bugdown(markdown.Extension):
|
|
|
|
def extendMarkdown(self, md, md_globals):
|
2012-12-04 20:22:14 +01:00
|
|
|
del md.preprocessors['reference']
|
|
|
|
|
|
|
|
for k in ('image_link', 'image_reference', 'automail',
|
2012-12-11 21:19:15 +01:00
|
|
|
'autolink', 'link', 'reference', 'short_reference',
|
2013-01-31 21:13:09 +01:00
|
|
|
'escape', 'strong_em', 'emphasis', 'emphasis2',
|
|
|
|
'strong'):
|
2012-10-22 02:35:36 +02:00
|
|
|
del md.inlinePatterns[k]
|
|
|
|
|
2013-01-31 21:13:09 +01:00
|
|
|
# Custom bold syntax: **foo** but not __foo__
|
|
|
|
md.inlinePatterns.add('strong',
|
2013-02-14 17:45:04 +01:00
|
|
|
markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'),
|
2013-01-31 21:13:09 +01:00
|
|
|
'>not_strong')
|
|
|
|
|
2012-11-02 18:25:37 +01:00
|
|
|
for k in ('hashheader', 'setextheader', 'olist', 'ulist'):
|
2012-10-22 02:35:36 +02:00
|
|
|
del md.parser.blockprocessors[k]
|
2012-10-16 17:35:58 +02:00
|
|
|
|
2012-11-02 18:25:37 +01:00
|
|
|
md.parser.blockprocessors.add('ulist', UListProcessor(md.parser), '>hr')
|
|
|
|
|
2012-10-22 02:15:44 +02:00
|
|
|
md.inlinePatterns.add('gravatar', Gravatar(r'!gravatar\((?P<email>[^)]*)\)'), '_begin')
|
2012-12-11 21:19:15 +01:00
|
|
|
md.inlinePatterns.add('link', LinkPattern(markdown.inlinepatterns.LINK_RE, md), '>backtick')
|
2012-10-17 04:42:19 +02:00
|
|
|
|
2013-02-11 20:49:48 +01:00
|
|
|
# markdown.inlinepatterns.Pattern compiles this with re.UNICODE, which
|
|
|
|
# is important because we're using \w.
|
|
|
|
#
|
|
|
|
# This rule must come after the built-in 'link' markdown linkifier to
|
|
|
|
# avoid errors.
|
|
|
|
http_link_regex = r'\b(?P<url>https?://[^\s]+?)(?=[^\w/]*(\s|\Z))'
|
|
|
|
md.inlinePatterns.add('http_autolink', HttpLink(http_link_regex), '>link')
|
|
|
|
|
2013-02-01 20:04:28 +01:00
|
|
|
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
|
2012-11-20 19:33:10 +01:00
|
|
|
#
|
2013-02-01 20:04:28 +01:00
|
|
|
# We detect a url by checking for the TLD, and building around it.
|
|
|
|
#
|
|
|
|
# To support () in urls but not match ending ) when a url is inside a parenthesis,
|
|
|
|
# we match at maximum one set of matching parens in a url. We could extend this
|
|
|
|
# to match two parenthetical groups, at the cost of more regex complexity.
|
2013-02-11 20:49:48 +01:00
|
|
|
#
|
|
|
|
# This rule must come after the http_autolink rule we add above to avoid double
|
|
|
|
# linkifying.
|
2013-02-05 19:04:45 +01:00
|
|
|
tlds = '|'.join(['co.uk', 'com', 'co', 'biz', 'gd', 'org', 'net', 'ly', 'edu', 'mil',
|
2013-02-06 15:33:24 +01:00
|
|
|
'gov', 'info', 'me', 'it', '.ca', 'tv', 'fm', 'io', 'gl'])
|
|
|
|
link_regex = r"\b(?P<url>[^\s]+\.(%s)(?:/[^\s()\":]*?|([^\s()\":]*\([^\s()\":]*\)[^\s()\":]*))?)(?=([:;\?\),\.\'\"]\Z|[:;\?\),\.\'\"]\s|\Z|\s))" % (tlds,)
|
2013-02-11 20:49:48 +01:00
|
|
|
md.inlinePatterns.add('autolink', AutoLink(link_regex), '>http_autolink')
|
2012-10-22 02:32:18 +02:00
|
|
|
|
2013-01-24 19:35:20 +01:00
|
|
|
md.preprocessors.add('hanging_ulists',
|
|
|
|
BugdownUListPreprocessor(md),
|
|
|
|
"_begin")
|
|
|
|
|
2013-03-01 19:20:53 +01:00
|
|
|
md.treeprocessors.add("inline_images", InlineImagePreviewProcessor(md), "_end")
|
|
|
|
|
2012-11-20 20:15:55 +01:00
|
|
|
_md_engine = markdown.Markdown(
|
|
|
|
safe_mode = 'escape',
|
|
|
|
output_format = 'html',
|
|
|
|
extensions = ['nl2br',
|
|
|
|
codehilite.makeExtension(configs=[
|
|
|
|
('force_linenos', False),
|
|
|
|
('guess_lang', False)]),
|
|
|
|
fenced_code.makeExtension(),
|
|
|
|
Bugdown()])
|
2012-10-15 22:03:50 +02:00
|
|
|
|
2012-10-25 21:38:47 +02:00
|
|
|
# We want to log Markdown parser failures, but shouldn't log the actual input
|
|
|
|
# message for privacy reasons. The compromise is to replace all alphanumeric
|
|
|
|
# characters with 'x'.
|
|
|
|
#
|
|
|
|
# We also use repr() to improve reproducibility, and to escape terminal control
|
|
|
|
# codes, which can do surprisingly nasty things.
|
|
|
|
_privacy_re = re.compile(r'\w', flags=re.UNICODE)
|
|
|
|
def _sanitize_for_log(md):
|
|
|
|
return repr(_privacy_re.sub('x', md))
|
|
|
|
|
2012-10-15 22:03:50 +02:00
|
|
|
def convert(md):
|
|
|
|
"""Convert Markdown to HTML, with Humbug-specific settings and hacks."""
|
2012-11-20 20:15:55 +01:00
|
|
|
|
|
|
|
# Reset the parser; otherwise it will get slower over time.
|
|
|
|
_md_engine.reset()
|
2012-10-15 22:03:50 +02:00
|
|
|
|
|
|
|
try:
|
2013-01-29 21:47:53 +01:00
|
|
|
# Spend at most 5 seconds rendering.
|
|
|
|
# Sometimes Python-Markdown is really slow; see
|
|
|
|
# https://trac.humbughq.com/ticket/345
|
|
|
|
html = timeout(5, _md_engine.convert, md)
|
2012-10-15 22:03:50 +02:00
|
|
|
except:
|
2013-01-31 19:57:25 +01:00
|
|
|
from zephyr.models import Recipient
|
|
|
|
from zephyr.lib.actions import internal_send_message
|
|
|
|
|
|
|
|
cleaned = _sanitize_for_log(md)
|
|
|
|
|
2012-10-15 22:03:50 +02:00
|
|
|
html = '<p>[Humbug note: Sorry, we could not understand the formatting of your message]</p>'
|
2013-01-31 19:57:25 +01:00
|
|
|
|
|
|
|
# Output error to log as well as sending a humbug and email
|
2012-10-25 21:38:47 +02:00
|
|
|
logging.getLogger('').error('Exception in Markdown parser: %sInput (sanitized) was: %s'
|
2013-01-31 19:57:25 +01:00
|
|
|
% (traceback.format_exc(), cleaned))
|
|
|
|
subject = "Markdown parser failure"
|
|
|
|
internal_send_message("humbug+errors@humbughq.com",
|
|
|
|
Recipient.STREAM, "devel", subject,
|
|
|
|
"Markdown parser failed, message sent to devel@")
|
|
|
|
mail.mail_admins(subject, "Failed message: %s\n\n%s\n\n" % (
|
|
|
|
cleaned, traceback.format_exc()),
|
|
|
|
fail_silently=False)
|
2012-10-15 22:03:50 +02:00
|
|
|
|
|
|
|
return html
|