2018-05-15 19:28:42 +02:00
|
|
|
# Set of helper functions to manipulate the OpenAPI files that define our REST
|
|
|
|
# API's specification.
|
|
|
|
import os
|
2019-07-08 14:08:02 +02:00
|
|
|
from typing import Any, Dict, List, Optional, Set
|
2018-05-15 19:28:42 +02:00
|
|
|
|
|
|
|
OPENAPI_SPEC_PATH = os.path.abspath(os.path.join(
|
|
|
|
os.path.dirname(__file__),
|
|
|
|
'../openapi/zulip.yaml'))
|
|
|
|
|
2018-06-20 19:31:24 +02:00
|
|
|
# A list of exceptions we allow when running validate_against_openapi_schema.
|
|
|
|
# The validator will ignore these keys when they appear in the "content"
|
|
|
|
# passed.
|
|
|
|
EXCLUDE_PROPERTIES = {
|
2018-06-20 23:03:01 +02:00
|
|
|
'/register': {
|
|
|
|
'post': {
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
'200': ['max_message_id', 'realm_emoji'],
|
|
|
|
},
|
2020-06-10 19:39:24 +02:00
|
|
|
},
|
|
|
|
'/zulip-outgoing-webhook': {
|
|
|
|
'post': {
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
'200': ['result', 'msg', 'message'],
|
|
|
|
},
|
|
|
|
},
|
2018-06-20 19:31:24 +02:00
|
|
|
}
|
|
|
|
|
2018-08-07 23:40:07 +02:00
|
|
|
class OpenAPISpec():
|
|
|
|
def __init__(self, path: str) -> None:
|
|
|
|
self.path = path
|
python: Convert assignment type annotations to Python 3.6 style.
This commit was split by tabbott; this piece covers the vast majority
of files in Zulip, but excludes scripts/, tools/, and puppet/ to help
ensure we at least show the right error messages for Xenial systems.
We can likely further refine the remaining pieces with some testing.
Generated by com2ann, with whitespace fixes and various manual fixes
for runtime issues:
- invoiced_through: Optional[LicenseLedger] = models.ForeignKey(
+ invoiced_through: Optional["LicenseLedger"] = models.ForeignKey(
-_apns_client: Optional[APNsClient] = None
+_apns_client: Optional["APNsClient"] = None
- notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- signup_notifications_stream: Optional[Stream] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
+ signup_notifications_stream: Optional["Stream"] = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE)
- author: Optional[UserProfile] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
+ author: Optional["UserProfile"] = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
- bot_owner: Optional[UserProfile] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
+ bot_owner: Optional["UserProfile"] = models.ForeignKey('self', null=True, on_delete=models.SET_NULL)
- default_sending_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
- default_events_register_stream: Optional[Stream] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_sending_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
+ default_events_register_stream: Optional["Stream"] = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE)
-descriptors_by_handler_id: Dict[int, ClientDescriptor] = {}
+descriptors_by_handler_id: Dict[int, "ClientDescriptor"] = {}
-worker_classes: Dict[str, Type[QueueProcessingWorker]] = {}
-queues: Dict[str, Dict[str, Type[QueueProcessingWorker]]] = {}
+worker_classes: Dict[str, Type["QueueProcessingWorker"]] = {}
+queues: Dict[str, Dict[str, Type["QueueProcessingWorker"]]] = {}
-AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional[LDAPSearch] = None
+AUTH_LDAP_REVERSE_EMAIL_SEARCH: Optional["LDAPSearch"] = None
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-22 01:09:50 +02:00
|
|
|
self.last_update: Optional[float] = None
|
|
|
|
self.data: Optional[Dict[str, Any]] = None
|
2018-08-07 23:40:07 +02:00
|
|
|
|
|
|
|
def reload(self) -> None:
|
2018-08-08 22:33:49 +02:00
|
|
|
# Because importing yamole (and in turn, yaml) takes
|
|
|
|
# significant time, and we only use python-yaml for our API
|
|
|
|
# docs, importing it lazily here is a significant optimization
|
|
|
|
# to `manage.py` startup.
|
2018-09-07 01:30:19 +02:00
|
|
|
#
|
|
|
|
# There is a bit of a race here...we may have two processes
|
|
|
|
# accessing this module level object and both trying to
|
|
|
|
# populate self.data at the same time. Hopefully this will
|
|
|
|
# only cause some extra processing at startup and not data
|
|
|
|
# corruption.
|
2018-08-08 22:33:49 +02:00
|
|
|
from yamole import YamoleParser
|
2018-08-07 23:40:07 +02:00
|
|
|
with open(self.path) as f:
|
|
|
|
yaml_parser = YamoleParser(f)
|
2018-09-07 01:30:19 +02:00
|
|
|
|
2018-08-07 23:40:07 +02:00
|
|
|
self.data = yaml_parser.data
|
2018-09-07 01:30:19 +02:00
|
|
|
self.last_update = os.path.getmtime(self.path)
|
2018-08-07 23:40:07 +02:00
|
|
|
|
|
|
|
def spec(self) -> Dict[str, Any]:
|
|
|
|
"""Reload the OpenAPI file if it has been modified after the last time
|
|
|
|
it was read, and then return the parsed data.
|
|
|
|
"""
|
|
|
|
last_modified = os.path.getmtime(self.path)
|
|
|
|
# Using != rather than < to cover the corner case of users placing an
|
|
|
|
# earlier version than the current one
|
|
|
|
if self.last_update != last_modified:
|
|
|
|
self.reload()
|
2018-09-07 01:30:19 +02:00
|
|
|
assert(self.data)
|
2018-08-07 23:40:07 +02:00
|
|
|
return self.data
|
2018-06-20 19:31:24 +02:00
|
|
|
|
2018-05-31 19:41:17 +02:00
|
|
|
class SchemaError(Exception):
|
|
|
|
pass
|
2018-05-15 19:28:42 +02:00
|
|
|
|
2018-08-07 23:40:07 +02:00
|
|
|
openapi_spec = OpenAPISpec(OPENAPI_SPEC_PATH)
|
|
|
|
|
2020-04-17 19:16:43 +02:00
|
|
|
def get_schema(endpoint: str, method: str, response: str) -> Dict[str, Any]:
|
|
|
|
if len(response) == 3:
|
|
|
|
schema = (openapi_spec.spec()['paths'][endpoint][method.lower()]['responses']
|
|
|
|
[response]['content']['application/json']['schema'])
|
|
|
|
return schema
|
|
|
|
else:
|
|
|
|
resp_code = int(response[4])
|
|
|
|
response = response[0:3]
|
|
|
|
schema = (openapi_spec.spec()['paths'][endpoint][method.lower()]['responses']
|
|
|
|
[response]['content']['application/json']['schema']["oneOf"][resp_code])
|
|
|
|
return schema
|
|
|
|
|
2018-05-15 19:28:42 +02:00
|
|
|
def get_openapi_fixture(endpoint: str, method: str,
|
|
|
|
response: Optional[str]='200') -> Dict[str, Any]:
|
2018-05-31 19:41:17 +02:00
|
|
|
"""Fetch a fixture from the full spec object.
|
|
|
|
"""
|
2020-04-17 19:16:43 +02:00
|
|
|
if response is None:
|
|
|
|
response = '200'
|
|
|
|
return (get_schema(endpoint, method, response)['example'])
|
2018-05-15 19:28:42 +02:00
|
|
|
|
2020-04-28 12:13:46 +02:00
|
|
|
def get_openapi_description(endpoint: str, method: str) -> str:
|
|
|
|
"""Fetch a description from the full spec object.
|
|
|
|
"""
|
|
|
|
description = openapi_spec.spec()['paths'][endpoint][method.lower()]['description']
|
|
|
|
return description
|
|
|
|
|
2019-07-08 14:08:02 +02:00
|
|
|
def get_openapi_paths() -> Set[str]:
|
|
|
|
return set(openapi_spec.spec()['paths'].keys())
|
|
|
|
|
2019-08-17 01:21:08 +02:00
|
|
|
def get_openapi_parameters(endpoint: str, method: str,
|
|
|
|
include_url_parameters: bool=True) -> List[Dict[str, Any]]:
|
2019-07-15 22:33:16 +02:00
|
|
|
openapi_endpoint = openapi_spec.spec()['paths'][endpoint][method.lower()]
|
|
|
|
# We do a `.get()` for this last bit to distinguish documented
|
|
|
|
# endpoints with no parameters (empty list) from undocumented
|
|
|
|
# endpoints (KeyError exception).
|
2019-08-17 01:21:08 +02:00
|
|
|
parameters = openapi_endpoint.get('parameters', [])
|
|
|
|
# Also, we skip parameters defined in the URL.
|
|
|
|
if not include_url_parameters:
|
|
|
|
parameters = [parameter for parameter in parameters if
|
|
|
|
parameter['in'] != 'path']
|
|
|
|
return parameters
|
2018-05-31 19:41:17 +02:00
|
|
|
|
2020-05-20 11:57:57 +02:00
|
|
|
def get_openapi_return_values(endpoint: str, method: str,
|
|
|
|
include_url_parameters: bool=True) -> List[Dict[str, Any]]:
|
|
|
|
openapi_endpoint = openapi_spec.spec()['paths'][endpoint][method.lower()]
|
|
|
|
response = openapi_endpoint['responses']['200']['content']['application/json']['schema']
|
|
|
|
# In cases where we have used oneOf, the schemas only differ in examples
|
|
|
|
# So we can choose any.
|
|
|
|
if 'oneOf' in response:
|
|
|
|
response = response['oneOf'][0]
|
|
|
|
response = response['properties']
|
|
|
|
return response
|
|
|
|
|
2020-06-02 18:04:03 +02:00
|
|
|
exclusion_list: List[str] = []
|
|
|
|
|
2018-05-31 19:41:17 +02:00
|
|
|
def validate_against_openapi_schema(content: Dict[str, Any], endpoint: str,
|
2018-06-18 16:32:30 +02:00
|
|
|
method: str, response: str) -> None:
|
2018-05-31 19:41:17 +02:00
|
|
|
"""Compare a "content" dict with the defined schema for a specific method
|
|
|
|
in an endpoint.
|
|
|
|
"""
|
2020-06-10 21:18:27 +02:00
|
|
|
# Check if the response matches its code
|
|
|
|
if response.startswith('2') and (content.get('result', 'success').lower() != 'success'):
|
|
|
|
raise SchemaError("Response is not 200 but is validating against 200 schema")
|
2020-06-02 18:04:03 +02:00
|
|
|
global exclusion_list
|
2020-04-17 19:16:43 +02:00
|
|
|
schema = get_schema(endpoint, method, response)
|
2020-06-02 18:04:03 +02:00
|
|
|
# In a single response schema we do not have two keys with the same name.
|
|
|
|
# Hence exclusion list is declared globally
|
|
|
|
exclusion_list = (EXCLUDE_PROPERTIES.get(endpoint, {}).get(method, {}).get(response, []))
|
2020-06-10 19:39:24 +02:00
|
|
|
# Code is not declared but appears in various 400 responses. If common, it can be added
|
|
|
|
# to 400 response schema
|
|
|
|
if response.startswith('4'):
|
|
|
|
exclusion_list.append('code')
|
2020-06-02 18:04:03 +02:00
|
|
|
validate_object(content, schema)
|
|
|
|
|
|
|
|
def validate_array(content: List[Any], schema: Dict[str, Any]) -> None:
|
|
|
|
valid_types: List[type] = []
|
|
|
|
if 'oneOf' in schema['items']:
|
|
|
|
for valid_type in schema['items']['oneOf']:
|
|
|
|
valid_types.append(to_python_type(valid_type['type']))
|
|
|
|
else:
|
|
|
|
valid_types.append(to_python_type(schema['items']['type']))
|
|
|
|
for item in content:
|
|
|
|
if type(item) not in valid_types:
|
|
|
|
raise SchemaError('Wrong data type in array')
|
|
|
|
# We can directly check for objects and arrays as
|
|
|
|
# there are no mixed arrays consisting of objects
|
|
|
|
# and arrays.
|
2020-06-10 19:39:24 +02:00
|
|
|
if 'object' in valid_types:
|
|
|
|
if 'oneOf' not in schema['items']:
|
|
|
|
validate_object(item, schema['items'])
|
2020-06-03 00:16:02 +02:00
|
|
|
continue
|
|
|
|
# If the object was not an opaque object then
|
|
|
|
# the continue statement above should have
|
|
|
|
# been executed.
|
2020-06-10 19:39:24 +02:00
|
|
|
if type(item) is dict:
|
2020-06-03 00:16:02 +02:00
|
|
|
raise SchemaError('Opaque object in array')
|
2020-06-02 18:04:03 +02:00
|
|
|
if 'items' in schema['items']:
|
|
|
|
validate_array(item, schema['items'])
|
|
|
|
|
|
|
|
def validate_object(content: Dict[str, Any], schema: Dict[str, Any]) -> None:
|
2018-05-31 19:41:17 +02:00
|
|
|
for key, value in content.items():
|
2018-06-20 19:31:24 +02:00
|
|
|
if key in exclusion_list:
|
|
|
|
continue
|
2018-05-31 19:41:17 +02:00
|
|
|
# Check that the key is defined in the schema
|
|
|
|
if key not in schema['properties']:
|
2018-06-18 16:47:20 +02:00
|
|
|
raise SchemaError('Extraneous key "{}" in the response\'s '
|
2018-05-31 19:41:17 +02:00
|
|
|
'content'.format(key))
|
|
|
|
# Check that the types match
|
2020-06-10 19:39:24 +02:00
|
|
|
expected_type: List[type] = []
|
|
|
|
if 'oneOf' in schema['properties'][key]:
|
|
|
|
for types in schema['properties'][key]['oneOf']:
|
|
|
|
expected_type.append(to_python_type(types['type']))
|
|
|
|
else:
|
|
|
|
expected_type.append(to_python_type(schema['properties'][key]['type']))
|
2018-05-31 19:41:17 +02:00
|
|
|
actual_type = type(value)
|
2020-06-02 18:04:03 +02:00
|
|
|
# We have only define nullable property if it is nullable
|
|
|
|
if value is None and 'nullable' in schema['properties'][key]:
|
|
|
|
continue
|
2020-06-10 19:39:24 +02:00
|
|
|
if actual_type not in expected_type:
|
2018-05-31 19:41:17 +02:00
|
|
|
raise SchemaError('Expected type {} for key "{}", but actually '
|
|
|
|
'got {}'.format(expected_type, key, actual_type))
|
2020-06-02 18:04:03 +02:00
|
|
|
if expected_type is list:
|
|
|
|
validate_array(value, schema['properties'][key])
|
|
|
|
if 'properties' in schema['properties'][key]:
|
|
|
|
validate_object(value, schema['properties'][key])
|
|
|
|
continue
|
|
|
|
if 'additionalProperties' in schema['properties'][key]:
|
|
|
|
for child_keys in value:
|
2020-06-03 00:16:02 +02:00
|
|
|
if type(value[child_keys]) is list:
|
|
|
|
validate_array(value[child_keys],
|
|
|
|
schema['properties'][key]['additionalProperties'])
|
|
|
|
continue
|
2020-06-02 18:04:03 +02:00
|
|
|
validate_object(value[child_keys],
|
|
|
|
schema['properties'][key]['additionalProperties'])
|
2020-06-03 00:16:02 +02:00
|
|
|
continue
|
|
|
|
# If the object is not opaque then continue statements
|
|
|
|
# will be executed above and this will be skipped
|
|
|
|
if expected_type is dict:
|
2020-06-09 00:25:09 +02:00
|
|
|
raise SchemaError(f'Opaque object "{key}"')
|
2018-05-31 19:41:17 +02:00
|
|
|
# Check that at least all the required keys are present
|
2020-06-02 18:04:03 +02:00
|
|
|
if 'required' in schema:
|
|
|
|
for req_key in schema['required']:
|
2020-06-10 19:39:24 +02:00
|
|
|
if req_key in exclusion_list:
|
|
|
|
continue
|
2020-06-02 18:04:03 +02:00
|
|
|
if req_key not in content.keys():
|
2020-06-10 19:39:24 +02:00
|
|
|
raise SchemaError('Expected to find the "{}" required key'.format(req_key))
|
2018-05-31 19:41:17 +02:00
|
|
|
|
|
|
|
def to_python_type(py_type: str) -> type:
|
2020-03-28 01:25:56 +01:00
|
|
|
"""Transform an OpenAPI-like type to a Python one.
|
2018-05-31 19:41:17 +02:00
|
|
|
https://swagger.io/docs/specification/data-models/data-types
|
|
|
|
"""
|
|
|
|
TYPES = {
|
|
|
|
'string': str,
|
|
|
|
'number': float,
|
|
|
|
'integer': int,
|
|
|
|
'boolean': bool,
|
|
|
|
'array': list,
|
python: Use trailing commas consistently.
Automatically generated by the following script, based on the output
of lint with flake8-comma:
import re
import sys
last_filename = None
last_row = None
lines = []
for msg in sys.stdin:
m = re.match(
r"\x1b\[35mflake8 \|\x1b\[0m \x1b\[1;31m(.+):(\d+):(\d+): (\w+)", msg
)
if m:
filename, row_str, col_str, err = m.groups()
row, col = int(row_str), int(col_str)
if filename == last_filename:
assert last_row != row
else:
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
with open(filename) as f:
lines = f.readlines()
last_filename = filename
last_row = row
line = lines[row - 1]
if err in ["C812", "C815"]:
lines[row - 1] = line[: col - 1] + "," + line[col - 1 :]
elif err in ["C819"]:
assert line[col - 2] == ","
lines[row - 1] = line[: col - 2] + line[col - 1 :].lstrip(" ")
if last_filename is not None:
with open(last_filename, "w") as f:
f.writelines(lines)
Signed-off-by: Anders Kaseorg <anders@zulipchat.com>
2020-04-10 05:23:40 +02:00
|
|
|
'object': dict,
|
2018-05-31 19:41:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return TYPES[py_type]
|