2016-03-13 12:50:20 +01:00
|
|
|
# Webhooks for external integrations.
|
2017-11-16 00:43:10 +01:00
|
|
|
import re
|
2019-05-09 03:40:38 +02:00
|
|
|
import string
|
2020-01-14 22:06:24 +01:00
|
|
|
from typing import Any, Callable, Dict, List, Optional
|
2016-05-25 15:02:02 +02:00
|
|
|
|
2017-11-16 00:43:10 +01:00
|
|
|
from django.db.models import Q
|
2016-06-05 23:09:32 +02:00
|
|
|
from django.http import HttpRequest, HttpResponse
|
2016-03-13 12:50:20 +01:00
|
|
|
|
2017-10-31 04:25:48 +01:00
|
|
|
from zerver.decorator import api_key_only_webhook_view
|
|
|
|
from zerver.lib.request import REQ, has_request_variables
|
2019-02-02 23:53:55 +01:00
|
|
|
from zerver.lib.response import json_success
|
2020-06-11 00:54:34 +02:00
|
|
|
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, check_send_webhook_message
|
2018-12-07 00:05:57 +01:00
|
|
|
from zerver.models import Realm, UserProfile, get_user_by_delivery_email
|
2017-01-03 18:44:13 +01:00
|
|
|
|
|
|
|
IGNORED_EVENTS = [
|
2019-05-02 01:07:36 +02:00
|
|
|
'issuelink_created',
|
2019-02-20 22:39:46 +01:00
|
|
|
'attachment_created',
|
|
|
|
'issuelink_deleted',
|
|
|
|
'sprint_started',
|
2020-03-27 00:38:33 +01:00
|
|
|
'sprint_closed',
|
2020-03-27 14:19:00 +01:00
|
|
|
'worklog_created',
|
|
|
|
'worklog_updated',
|
2017-01-03 18:44:13 +01:00
|
|
|
]
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def guess_zulip_user_from_jira(jira_username: str, realm: Realm) -> Optional[UserProfile]:
|
2016-03-13 12:50:20 +01:00
|
|
|
try:
|
|
|
|
# Try to find a matching user in Zulip
|
|
|
|
# We search a user's full name, short name,
|
|
|
|
# and beginning of email address
|
|
|
|
user = UserProfile.objects.filter(
|
2017-01-24 07:06:13 +01:00
|
|
|
Q(full_name__iexact=jira_username) |
|
|
|
|
Q(email__istartswith=jira_username),
|
|
|
|
is_active=True,
|
|
|
|
realm=realm).order_by("id")[0]
|
2016-03-13 12:50:20 +01:00
|
|
|
return user
|
|
|
|
except IndexError:
|
|
|
|
return None
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def convert_jira_markup(content: str, realm: Realm) -> str:
|
2016-03-13 12:50:20 +01:00
|
|
|
# Attempt to do some simplistic conversion of JIRA
|
|
|
|
# formatting to Markdown, for consumption in Zulip
|
|
|
|
|
|
|
|
# Jira uses *word* for bold, we use **word**
|
|
|
|
content = re.sub(r'\*([^\*]+)\*', r'**\1**', content)
|
|
|
|
|
|
|
|
# Jira uses {{word}} for monospacing, we use `word`
|
|
|
|
content = re.sub(r'{{([^\*]+?)}}', r'`\1`', content)
|
|
|
|
|
|
|
|
# Starting a line with bq. block quotes that line
|
|
|
|
content = re.sub(r'bq\. (.*)', r'> \1', content)
|
|
|
|
|
|
|
|
# Wrapping a block of code in {quote}stuff{quote} also block-quotes it
|
|
|
|
quote_re = re.compile(r'{quote}(.*?){quote}', re.DOTALL)
|
2016-10-17 08:22:00 +02:00
|
|
|
content = re.sub(quote_re, r'~~~ quote\n\1\n~~~', content)
|
2016-03-13 12:50:20 +01:00
|
|
|
|
|
|
|
# {noformat}stuff{noformat} blocks are just code blocks with no
|
|
|
|
# syntax highlighting
|
|
|
|
noformat_re = re.compile(r'{noformat}(.*?){noformat}', re.DOTALL)
|
2016-10-17 08:22:00 +02:00
|
|
|
content = re.sub(noformat_re, r'~~~\n\1\n~~~', content)
|
2016-03-13 12:50:20 +01:00
|
|
|
|
|
|
|
# Code blocks are delineated by {code[: lang]} {code}
|
|
|
|
code_re = re.compile(r'{code[^\n]*}(.*?){code}', re.DOTALL)
|
2016-10-17 08:22:00 +02:00
|
|
|
content = re.sub(code_re, r'~~~\n\1\n~~~', content)
|
2016-03-13 12:50:20 +01:00
|
|
|
|
|
|
|
# Links are of form: [https://www.google.com] or [Link Title|https://www.google.com]
|
|
|
|
# In order to support both forms, we don't match a | in bare links
|
|
|
|
content = re.sub(r'\[([^\|~]+?)\]', r'[\1](\1)', content)
|
|
|
|
|
2020-08-11 01:47:49 +02:00
|
|
|
# Full links which have a | are converted into a better Markdown link
|
2019-05-23 10:17:17 +02:00
|
|
|
full_link_re = re.compile(r'\[(?:(?P<title>[^|~]+)\|)(?P<url>[^\]]*)\]')
|
2016-10-17 08:22:00 +02:00
|
|
|
content = re.sub(full_link_re, r'[\g<title>](\g<url>)', content)
|
2016-03-13 12:50:20 +01:00
|
|
|
|
|
|
|
# Try to convert a JIRA user mention of format [~username] into a
|
|
|
|
# Zulip user mention. We don't know the email, just the JIRA username,
|
|
|
|
# so we naively guess at their Zulip account using this
|
|
|
|
if realm:
|
2020-04-09 21:51:58 +02:00
|
|
|
mention_re = re.compile('\\[~(.*?)\\]')
|
2016-03-13 12:50:20 +01:00
|
|
|
for username in mention_re.findall(content):
|
|
|
|
# Try to look up username
|
|
|
|
user_profile = guess_zulip_user_from_jira(username, realm)
|
|
|
|
if user_profile:
|
2020-06-09 00:25:09 +02:00
|
|
|
replacement = f"**{user_profile.full_name}**"
|
2016-03-13 12:50:20 +01:00
|
|
|
else:
|
2020-06-09 00:25:09 +02:00
|
|
|
replacement = f"**{username}**"
|
2016-03-13 12:50:20 +01:00
|
|
|
|
2020-06-09 00:25:09 +02:00
|
|
|
content = content.replace(f"[~{username}]", replacement)
|
2016-03-13 12:50:20 +01:00
|
|
|
|
|
|
|
return content
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def get_in(payload: Dict[str, Any], keys: List[str], default: str='') -> Any:
|
2016-12-31 15:57:50 +01:00
|
|
|
try:
|
|
|
|
for key in keys:
|
|
|
|
payload = payload[key]
|
|
|
|
except (AttributeError, KeyError, TypeError):
|
|
|
|
return default
|
|
|
|
return payload
|
|
|
|
|
2019-05-03 12:55:28 +02:00
|
|
|
def get_issue_string(payload: Dict[str, Any], issue_id: Optional[str]=None, with_title: bool=False) -> str:
|
2016-03-13 12:50:20 +01:00
|
|
|
# Guess the URL as it is not specified in the payload
|
|
|
|
# We assume that there is a /browse/BUG-### page
|
|
|
|
# from the REST url of the issue itself
|
2016-12-31 15:57:50 +01:00
|
|
|
if issue_id is None:
|
|
|
|
issue_id = get_issue_id(payload)
|
|
|
|
|
2019-05-03 12:55:28 +02:00
|
|
|
if with_title:
|
2020-06-09 00:25:09 +02:00
|
|
|
text = f"{issue_id}: {get_issue_title(payload)}"
|
2019-05-03 12:55:28 +02:00
|
|
|
else:
|
|
|
|
text = issue_id
|
|
|
|
|
2018-07-02 00:05:24 +02:00
|
|
|
base_url = re.match(r"(.*)\/rest\/api/.*", get_in(payload, ['issue', 'self']))
|
2016-12-31 15:57:50 +01:00
|
|
|
if base_url and len(base_url.groups()):
|
2020-06-09 00:25:09 +02:00
|
|
|
return f"[{text}]({base_url.group(1)}/browse/{issue_id})"
|
2016-03-13 12:50:20 +01:00
|
|
|
else:
|
2019-05-03 12:55:28 +02:00
|
|
|
return text
|
2016-12-31 15:57:50 +01:00
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def get_assignee_mention(assignee_email: str, realm: Realm) -> str:
|
2016-03-13 12:50:20 +01:00
|
|
|
if assignee_email != '':
|
|
|
|
try:
|
2018-12-07 00:05:57 +01:00
|
|
|
assignee_name = get_user_by_delivery_email(assignee_email, realm).full_name
|
2016-03-13 12:50:20 +01:00
|
|
|
except UserProfile.DoesNotExist:
|
2016-12-31 15:57:50 +01:00
|
|
|
assignee_name = assignee_email
|
2020-06-09 00:25:09 +02:00
|
|
|
return f"**{assignee_name}**"
|
2016-12-31 15:57:50 +01:00
|
|
|
return ''
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def get_issue_author(payload: Dict[str, Any]) -> str:
|
2016-12-31 15:57:50 +01:00
|
|
|
return get_in(payload, ['user', 'displayName'])
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def get_issue_id(payload: Dict[str, Any]) -> str:
|
2020-02-26 22:10:25 +01:00
|
|
|
if 'issue' not in payload:
|
|
|
|
# Some ancient version of Jira or one of its extensions posts
|
|
|
|
# comment_created events without an "issue" element. For
|
|
|
|
# these, the best we can do is extract the Jira-intenral
|
|
|
|
# issue number and use that in the topic.
|
|
|
|
#
|
|
|
|
# Users who want better formatting can upgrade Jira.
|
|
|
|
return payload['comment']['self'].split('/')[-3]
|
|
|
|
|
2016-12-31 15:57:50 +01:00
|
|
|
return get_in(payload, ['issue', 'key'])
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def get_issue_title(payload: Dict[str, Any]) -> str:
|
2020-02-26 22:10:25 +01:00
|
|
|
if 'issue' not in payload:
|
|
|
|
# Some ancient version of Jira or one of its extensions posts
|
|
|
|
# comment_created events without an "issue" element. For
|
|
|
|
# these, the best we can do is extract the Jira-intenral
|
|
|
|
# issue number and use that in the topic.
|
|
|
|
#
|
|
|
|
# Users who want better formatting can upgrade Jira.
|
|
|
|
return 'Upgrade Jira to get the issue title here.'
|
|
|
|
|
2016-12-31 15:57:50 +01:00
|
|
|
return get_in(payload, ['issue', 'fields', 'summary'])
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def get_issue_subject(payload: Dict[str, Any]) -> str:
|
2020-06-09 00:25:09 +02:00
|
|
|
return f"{get_issue_id(payload)}: {get_issue_title(payload)}"
|
2016-12-31 15:57:50 +01:00
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def get_sub_event_for_update_issue(payload: Dict[str, Any]) -> str:
|
2017-01-30 19:26:48 +01:00
|
|
|
sub_event = payload.get('issue_event_type_name', '')
|
|
|
|
if sub_event == '':
|
|
|
|
if payload.get('comment'):
|
|
|
|
return 'issue_commented'
|
|
|
|
elif payload.get('transition'):
|
|
|
|
return 'issue_transited'
|
|
|
|
return sub_event
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def get_event_type(payload: Dict[str, Any]) -> Optional[str]:
|
2017-01-30 19:26:48 +01:00
|
|
|
event = payload.get('webhookEvent')
|
|
|
|
if event is None and payload.get('transition'):
|
|
|
|
event = 'jira:issue_updated'
|
|
|
|
return event
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def add_change_info(content: str, field: str, from_field: str, to_field: str) -> str:
|
2020-06-09 00:25:09 +02:00
|
|
|
content += f"* Changed {field}"
|
2017-01-30 19:26:48 +01:00
|
|
|
if from_field:
|
2020-06-09 00:25:09 +02:00
|
|
|
content += f" from **{from_field}**"
|
2017-01-30 19:26:48 +01:00
|
|
|
if to_field:
|
2020-06-09 00:25:09 +02:00
|
|
|
content += f" to {to_field}\n"
|
2017-01-30 19:26:48 +01:00
|
|
|
return content
|
|
|
|
|
2018-05-10 19:34:01 +02:00
|
|
|
def handle_updated_issue_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
|
2016-12-31 15:57:50 +01:00
|
|
|
# Reassigned, commented, reopened, and resolved events are all bundled
|
|
|
|
# into this one 'updated' event type, so we try to extract the meaningful
|
|
|
|
# event that happened
|
|
|
|
issue_id = get_in(payload, ['issue', 'key'])
|
2019-05-03 12:55:28 +02:00
|
|
|
issue = get_issue_string(payload, issue_id, True)
|
2016-03-13 12:50:20 +01:00
|
|
|
|
2016-12-31 15:57:50 +01:00
|
|
|
assignee_email = get_in(payload, ['issue', 'fields', 'assignee', 'emailAddress'], '')
|
2017-07-14 01:29:03 +02:00
|
|
|
assignee_mention = get_assignee_mention(assignee_email, user_profile.realm)
|
2016-03-13 12:50:20 +01:00
|
|
|
|
2016-12-31 15:57:50 +01:00
|
|
|
if assignee_mention != '':
|
2020-06-09 00:25:09 +02:00
|
|
|
assignee_blurb = f" (assigned to {assignee_mention})"
|
2016-12-31 15:57:50 +01:00
|
|
|
else:
|
|
|
|
assignee_blurb = ''
|
|
|
|
|
2017-01-30 19:26:48 +01:00
|
|
|
sub_event = get_sub_event_for_update_issue(payload)
|
2017-01-03 18:44:13 +01:00
|
|
|
if 'comment' in sub_event:
|
|
|
|
if sub_event == 'issue_commented':
|
2019-05-09 03:40:38 +02:00
|
|
|
verb = 'commented on'
|
2017-01-03 18:44:13 +01:00
|
|
|
elif sub_event == 'issue_comment_edited':
|
2019-05-09 03:40:38 +02:00
|
|
|
verb = 'edited a comment on'
|
2017-01-03 18:44:13 +01:00
|
|
|
else:
|
2019-05-09 03:40:38 +02:00
|
|
|
verb = 'deleted a comment from'
|
2019-01-07 21:58:39 +01:00
|
|
|
|
|
|
|
if payload.get('webhookEvent') == 'comment_created':
|
|
|
|
author = payload['comment']['author']['displayName']
|
|
|
|
else:
|
|
|
|
author = get_issue_author(payload)
|
|
|
|
|
2020-06-09 00:25:09 +02:00
|
|
|
content = f"{author} {verb} {issue}{assignee_blurb}"
|
2017-01-03 18:44:13 +01:00
|
|
|
comment = get_in(payload, ['comment', 'body'])
|
|
|
|
if comment:
|
|
|
|
comment = convert_jira_markup(comment, user_profile.realm)
|
2020-06-09 00:25:09 +02:00
|
|
|
content = f"{content}:\n\n``` quote\n{comment}\n```"
|
2019-05-09 03:40:38 +02:00
|
|
|
else:
|
2020-06-09 00:25:09 +02:00
|
|
|
content = f"{content}."
|
2017-01-03 18:44:13 +01:00
|
|
|
else:
|
2020-06-09 00:25:09 +02:00
|
|
|
content = f"{get_issue_author(payload)} updated {issue}{assignee_blurb}:\n\n"
|
2017-01-03 18:44:13 +01:00
|
|
|
changelog = get_in(payload, ['changelog'])
|
2016-03-13 12:50:20 +01:00
|
|
|
|
2017-01-03 18:44:13 +01:00
|
|
|
if changelog != '':
|
|
|
|
# Use the changelog to display the changes, whitelist types we accept
|
|
|
|
items = changelog.get('items')
|
|
|
|
for item in items:
|
|
|
|
field = item.get('field')
|
2016-03-13 12:50:20 +01:00
|
|
|
|
2017-01-03 18:44:13 +01:00
|
|
|
if field == 'assignee' and assignee_mention != '':
|
|
|
|
target_field_string = assignee_mention
|
|
|
|
else:
|
|
|
|
# Convert a user's target to a @-mention if possible
|
2020-04-09 21:51:58 +02:00
|
|
|
target_field_string = "**{}**".format(item.get('toString'))
|
2016-12-31 15:57:50 +01:00
|
|
|
|
2017-01-03 18:44:13 +01:00
|
|
|
from_field_string = item.get('fromString')
|
|
|
|
if target_field_string or from_field_string:
|
2017-01-30 19:26:48 +01:00
|
|
|
content = add_change_info(content, field, from_field_string, target_field_string)
|
|
|
|
|
|
|
|
elif sub_event == 'issue_transited':
|
|
|
|
from_field_string = get_in(payload, ['transition', 'from_status'])
|
2020-04-09 21:51:58 +02:00
|
|
|
target_field_string = '**{}**'.format(get_in(payload, ['transition', 'to_status']))
|
2017-01-30 19:26:48 +01:00
|
|
|
if target_field_string or from_field_string:
|
|
|
|
content = add_change_info(content, 'status', from_field_string, target_field_string)
|
2016-12-31 15:57:50 +01:00
|
|
|
|
|
|
|
return content
|
|
|
|
|
2019-02-17 21:10:10 +01:00
|
|
|
def handle_created_issue_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
|
2019-05-09 03:40:38 +02:00
|
|
|
template = """
|
|
|
|
{author} created {issue_string}:
|
|
|
|
|
|
|
|
* **Priority**: {priority}
|
|
|
|
* **Assignee**: {assignee}
|
|
|
|
""".strip()
|
|
|
|
|
|
|
|
return template.format(
|
|
|
|
author=get_issue_author(payload),
|
|
|
|
issue_string=get_issue_string(payload, with_title=True),
|
|
|
|
priority=get_in(payload, ['issue', 'fields', 'priority', 'name']),
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
assignee=get_in(payload, ['issue', 'fields', 'assignee', 'displayName'], 'no one'),
|
2016-12-31 15:57:50 +01:00
|
|
|
)
|
|
|
|
|
2019-02-17 21:10:10 +01:00
|
|
|
def handle_deleted_issue_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
|
2019-05-09 03:40:38 +02:00
|
|
|
template = "{author} deleted {issue_string}{punctuation}"
|
|
|
|
title = get_issue_title(payload)
|
|
|
|
punctuation = '.' if title[-1] not in string.punctuation else ''
|
|
|
|
return template.format(
|
|
|
|
author=get_issue_author(payload),
|
|
|
|
issue_string=get_issue_string(payload, with_title=True),
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
punctuation=punctuation,
|
2019-05-09 03:40:38 +02:00
|
|
|
)
|
2016-12-31 15:57:50 +01:00
|
|
|
|
2019-08-16 17:20:07 +02:00
|
|
|
def normalize_comment(comment: str) -> str:
|
|
|
|
# Here's how Jira escapes special characters in their payload:
|
|
|
|
# ,.?\\!\n\"'\n\\[]\\{}()\n@#$%^&*\n~`|/\\\\
|
|
|
|
# for some reason, as of writing this, ! has two '\' before it.
|
|
|
|
normalized_comment = comment.replace("\\!", "!")
|
|
|
|
return normalized_comment
|
|
|
|
|
|
|
|
def handle_comment_created_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
|
2020-02-26 22:10:25 +01:00
|
|
|
title = get_issue_title(payload)
|
2019-08-16 17:20:07 +02:00
|
|
|
return "{author} commented on issue: *\"{title}\"\
|
|
|
|
*\n``` quote\n{comment}\n```\n".format(
|
|
|
|
author = payload["comment"]["author"]["displayName"],
|
2020-02-26 22:10:25 +01:00
|
|
|
title = title,
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
comment = normalize_comment(payload["comment"]["body"]),
|
2019-08-16 17:20:07 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
def handle_comment_updated_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
|
2020-03-27 00:38:33 +01:00
|
|
|
title = get_issue_title(payload)
|
2019-08-16 17:20:07 +02:00
|
|
|
return "{author} updated their comment on issue: *\"{title}\"\
|
|
|
|
*\n``` quote\n{comment}\n```\n".format(
|
|
|
|
author = payload["comment"]["author"]["displayName"],
|
2020-03-27 00:38:33 +01:00
|
|
|
title = title,
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
comment = normalize_comment(payload["comment"]["body"]),
|
2019-08-16 17:20:07 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
def handle_comment_deleted_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
|
2020-03-27 00:38:33 +01:00
|
|
|
title = get_issue_title(payload)
|
2019-08-16 17:20:07 +02:00
|
|
|
return "{author} deleted their comment on issue: *\"{title}\"\
|
|
|
|
*\n``` quote\n~~{comment}~~\n```\n".format(
|
|
|
|
author = payload["comment"]["author"]["displayName"],
|
2020-03-27 00:38:33 +01:00
|
|
|
title = title,
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
comment = normalize_comment(payload["comment"]["body"]),
|
2019-08-16 17:20:07 +02:00
|
|
|
)
|
|
|
|
|
2019-02-17 21:10:10 +01:00
|
|
|
JIRA_CONTENT_FUNCTION_MAPPER = {
|
|
|
|
"jira:issue_created": handle_created_issue_event,
|
|
|
|
"jira:issue_deleted": handle_deleted_issue_event,
|
|
|
|
"jira:issue_updated": handle_updated_issue_event,
|
2019-08-16 17:20:07 +02:00
|
|
|
"comment_created": handle_comment_created_event,
|
|
|
|
"comment_updated": handle_comment_updated_event,
|
|
|
|
"comment_deleted": handle_comment_deleted_event,
|
2019-02-17 21:10:10 +01:00
|
|
|
}
|
|
|
|
|
2019-06-15 23:40:08 +02:00
|
|
|
def get_event_handler(event: Optional[str]) -> Optional[Callable[..., str]]:
|
|
|
|
if event is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
return JIRA_CONTENT_FUNCTION_MAPPER.get(event)
|
|
|
|
|
2016-12-31 15:57:50 +01:00
|
|
|
@api_key_only_webhook_view("JIRA")
|
|
|
|
@has_request_variables
|
2017-12-06 19:38:19 +01:00
|
|
|
def api_jira_webhook(request: HttpRequest, user_profile: UserProfile,
|
2018-03-16 22:53:50 +01:00
|
|
|
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
|
2016-12-31 15:57:50 +01:00
|
|
|
|
2017-01-30 19:26:48 +01:00
|
|
|
event = get_event_type(payload)
|
2019-02-17 21:10:10 +01:00
|
|
|
if event in IGNORED_EVENTS:
|
2017-01-03 18:44:13 +01:00
|
|
|
return json_success()
|
2019-02-17 21:10:10 +01:00
|
|
|
|
2019-06-15 23:40:08 +02:00
|
|
|
content_func = get_event_handler(event)
|
2019-02-17 21:10:10 +01:00
|
|
|
|
2019-07-28 22:35:50 +02:00
|
|
|
if content_func is None:
|
2018-05-22 16:46:45 +02:00
|
|
|
raise UnexpectedWebhookEventType('Jira', event)
|
2016-03-13 12:50:20 +01:00
|
|
|
|
2020-03-27 14:19:00 +01:00
|
|
|
subject = get_issue_subject(payload)
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
content: str = content_func(payload, user_profile)
|
2019-02-17 21:10:10 +01:00
|
|
|
|
2018-11-06 17:07:04 +01:00
|
|
|
check_send_webhook_message(request, user_profile,
|
|
|
|
subject, content,
|
2018-12-04 01:59:39 +01:00
|
|
|
unquote_url_parameters=True)
|
2016-03-13 12:50:20 +01:00
|
|
|
return json_success()
|