2016-10-27 12:06:44 +02:00
|
|
|
import re
|
2019-05-02 17:13:20 +02:00
|
|
|
import requests
|
|
|
|
|
2019-05-02 17:14:08 +02:00
|
|
|
from django.conf import settings
|
2019-05-02 17:13:20 +02:00
|
|
|
from django.utils.encoding import smart_text
|
2019-05-04 17:54:18 +02:00
|
|
|
import magic
|
2019-05-10 14:29:33 +02:00
|
|
|
from typing import Any, Optional, Dict, Callable
|
2016-10-27 12:06:44 +02:00
|
|
|
from typing.re import Match
|
2019-05-02 17:13:20 +02:00
|
|
|
|
2019-05-05 20:56:54 +02:00
|
|
|
from version import ZULIP_VERSION
|
2018-10-14 14:41:15 +02:00
|
|
|
from zerver.lib.cache import cache_with_key, get_cache_with_key, preview_url_cache_key
|
2016-10-27 12:06:44 +02:00
|
|
|
from zerver.lib.url_preview.oembed import get_oembed_data
|
|
|
|
from zerver.lib.url_preview.parsers import OpenGraphParser, GenericParser
|
|
|
|
|
2019-05-02 17:14:08 +02:00
|
|
|
# FIXME: Should we use a database cache or a memcached in production? What if
|
|
|
|
# opengraph data is changed for a site?
|
|
|
|
# Use an in-memory cache for development, to make it easy to develop this code
|
|
|
|
CACHE_NAME = "database" if not settings.DEVELOPMENT else "in-memory"
|
2016-10-27 12:06:44 +02:00
|
|
|
# Based on django.core.validators.URLValidator, with ftp support removed.
|
|
|
|
link_regex = re.compile(
|
|
|
|
r'^(?:http)s?://' # http:// or https://
|
|
|
|
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
|
|
|
|
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
|
|
|
|
r'(?::\d+)?' # optional port
|
|
|
|
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
|
2019-05-06 04:49:47 +02:00
|
|
|
# FIXME: This header and timeout are not used by pyoembed, when trying to autodiscover!
|
2019-05-05 20:56:54 +02:00
|
|
|
# Set a custom user agent, since some sites block us with the default requests header
|
|
|
|
HEADERS = {'User-Agent': 'Zulip URL preview/%s' % (ZULIP_VERSION,)}
|
2019-05-06 04:49:47 +02:00
|
|
|
TIMEOUT = 15
|
2016-10-27 12:06:44 +02:00
|
|
|
|
|
|
|
|
2018-05-10 19:13:36 +02:00
|
|
|
def is_link(url: str) -> Match[str]:
|
2017-06-16 00:23:35 +02:00
|
|
|
return link_regex.match(smart_text(url))
|
2016-10-27 12:06:44 +02:00
|
|
|
|
2019-05-04 17:54:18 +02:00
|
|
|
def guess_mimetype_from_content(response: requests.Response) -> str:
|
|
|
|
mime_magic = magic.Magic(mime=True)
|
|
|
|
try:
|
|
|
|
content = next(response.iter_content(1000))
|
|
|
|
except StopIteration:
|
|
|
|
content = ''
|
|
|
|
return mime_magic.from_buffer(content)
|
|
|
|
|
|
|
|
def valid_content_type(url: str) -> bool:
|
|
|
|
try:
|
2019-05-06 04:49:47 +02:00
|
|
|
response = requests.get(url, stream=True, headers=HEADERS, timeout=TIMEOUT)
|
2019-05-04 17:54:18 +02:00
|
|
|
except requests.RequestException:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if not response.ok:
|
|
|
|
return False
|
|
|
|
|
|
|
|
content_type = response.headers.get('content-type')
|
|
|
|
# Be accommodating of bad servers: assume content may be html if no content-type header
|
|
|
|
if not content_type or content_type.startswith('text/html'):
|
|
|
|
# Verify that the content is actually HTML if the server claims it is
|
|
|
|
content_type = guess_mimetype_from_content(response)
|
|
|
|
return content_type.startswith('text/html')
|
|
|
|
|
2019-05-10 14:29:33 +02:00
|
|
|
def catch_network_errors(func: Callable[..., Any]) -> Callable[..., Any]:
|
|
|
|
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
|
|
try:
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
except requests.exceptions.RequestException:
|
|
|
|
pass
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
@catch_network_errors
|
2018-10-14 14:41:15 +02:00
|
|
|
@cache_with_key(preview_url_cache_key, cache_name=CACHE_NAME, with_statsd_key="urlpreview_data")
|
2018-05-10 19:13:36 +02:00
|
|
|
def get_link_embed_data(url: str,
|
2017-11-05 11:15:10 +01:00
|
|
|
maxwidth: Optional[int]=640,
|
2018-06-16 23:00:17 +02:00
|
|
|
maxheight: Optional[int]=480) -> Optional[Dict[str, Any]]:
|
2016-10-27 12:06:44 +02:00
|
|
|
if not is_link(url):
|
|
|
|
return None
|
2019-05-04 17:54:18 +02:00
|
|
|
|
|
|
|
if not valid_content_type(url):
|
|
|
|
return None
|
|
|
|
|
2019-05-02 18:58:39 +02:00
|
|
|
# We are using two different mechanisms to get the embed data
|
|
|
|
# 1. Use OEmbed data, if found, for photo and video "type" sites
|
|
|
|
# 2. Otherwise, use a combination of Open Graph tags and Meta tags
|
2019-05-10 14:29:33 +02:00
|
|
|
data = get_oembed_data(url, maxwidth=maxwidth, maxheight=maxheight) or {}
|
2019-05-26 06:27:01 +02:00
|
|
|
if data.get('oembed'):
|
|
|
|
return data
|
2019-12-12 02:10:50 +01:00
|
|
|
|
2019-05-10 14:29:33 +02:00
|
|
|
response = requests.get(url, stream=True, headers=HEADERS, timeout=TIMEOUT)
|
2016-10-27 12:06:44 +02:00
|
|
|
if response.ok:
|
|
|
|
og_data = OpenGraphParser(response.text).extract_data()
|
2019-12-12 02:10:50 +01:00
|
|
|
for key in ['title', 'description', 'image']:
|
|
|
|
if not data.get(key) and og_data.get(key):
|
|
|
|
data[key] = og_data[key]
|
|
|
|
|
2016-10-27 12:06:44 +02:00
|
|
|
generic_data = GenericParser(response.text).extract_data() or {}
|
|
|
|
for key in ['title', 'description', 'image']:
|
|
|
|
if not data.get(key) and generic_data.get(key):
|
|
|
|
data[key] = generic_data[key]
|
|
|
|
return data
|
|
|
|
|
2018-10-14 14:41:15 +02:00
|
|
|
@get_cache_with_key(preview_url_cache_key, cache_name=CACHE_NAME)
|
2018-05-10 19:13:36 +02:00
|
|
|
def link_embed_data_from_cache(url: str, maxwidth: Optional[int]=640, maxheight: Optional[int]=480) -> Any:
|
2016-10-27 12:06:44 +02:00
|
|
|
return
|