2016-09-13 22:40:13 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2018-05-21 18:09:55 +02:00
|
|
|
import json
|
2016-09-13 22:40:13 +02:00
|
|
|
import os
|
|
|
|
import re
|
2016-10-19 11:37:32 +02:00
|
|
|
import hashlib
|
2018-06-06 00:49:48 +02:00
|
|
|
import subprocess
|
2017-10-18 04:23:06 +02:00
|
|
|
import sys
|
2020-02-10 14:22:58 +01:00
|
|
|
from typing import Any, List, Set
|
2016-09-13 22:40:13 +02:00
|
|
|
from importlib import import_module
|
2017-11-06 02:56:09 +01:00
|
|
|
from io import StringIO
|
2019-05-31 16:55:45 +02:00
|
|
|
import glob
|
2019-06-08 03:57:19 +02:00
|
|
|
import time
|
2019-07-06 00:29:17 +02:00
|
|
|
import shutil
|
2016-09-13 22:40:13 +02:00
|
|
|
|
2019-06-08 03:57:19 +02:00
|
|
|
from django.db import connections, DEFAULT_DB_ALIAS, ProgrammingError, \
|
|
|
|
connection
|
2016-11-11 14:11:11 +01:00
|
|
|
from django.db.utils import OperationalError
|
2016-09-13 22:40:13 +02:00
|
|
|
from django.apps import apps
|
2017-10-18 04:23:06 +02:00
|
|
|
from django.conf import settings
|
2016-09-13 22:40:13 +02:00
|
|
|
from django.core.management import call_command
|
|
|
|
from django.utils.module_loading import module_has_submodule
|
|
|
|
|
2017-10-18 04:23:06 +02:00
|
|
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
2019-06-08 03:57:19 +02:00
|
|
|
from scripts.lib.zulip_tools import get_dev_uuid_var_path, run, \
|
|
|
|
file_or_package_hash_updated, TEMPLATE_DATABASE_DIR
|
2017-10-18 04:23:06 +02:00
|
|
|
|
2020-02-10 14:22:58 +01:00
|
|
|
class DatabaseType:
|
|
|
|
def __init__(self, database_name: str, settings: str, migration_status: str):
|
|
|
|
self.database_name = database_name
|
|
|
|
self.settings = settings
|
|
|
|
self.migration_status = migration_status
|
|
|
|
|
2017-10-18 04:23:06 +02:00
|
|
|
UUID_VAR_DIR = get_dev_uuid_var_path()
|
2018-07-02 00:05:24 +02:00
|
|
|
FILENAME_SPLITTER = re.compile(r'[\W\-_]')
|
2016-10-21 12:48:15 +02:00
|
|
|
|
2020-02-10 14:22:58 +01:00
|
|
|
DEV_DATABASE_TYPE = DatabaseType(database_name='zulip',
|
|
|
|
settings='zproject.settings',
|
|
|
|
migration_status=os.path.join(UUID_VAR_DIR, "migration_status_dev"))
|
|
|
|
|
|
|
|
TEST_DATABASE_TYPE = DatabaseType(database_name='zulip_test_template',
|
|
|
|
settings='zproject.test_settings',
|
|
|
|
migration_status=os.path.join(UUID_VAR_DIR, 'migration_status_test'))
|
|
|
|
|
2018-06-06 01:08:27 +02:00
|
|
|
def run_db_migrations(platform: str) -> None:
|
|
|
|
if platform == 'dev':
|
|
|
|
migration_status_file = 'migration_status_dev'
|
|
|
|
settings = 'zproject.settings'
|
2018-07-09 08:56:55 +02:00
|
|
|
db_name = 'ZULIP_DB_NAME=zulip'
|
2018-06-06 01:08:27 +02:00
|
|
|
elif platform == 'test':
|
|
|
|
migration_status_file = 'migration_status_test'
|
|
|
|
settings = 'zproject.test_settings'
|
2018-07-09 08:56:55 +02:00
|
|
|
db_name = 'ZULIP_DB_NAME=zulip_test_template'
|
2018-06-06 01:08:27 +02:00
|
|
|
|
|
|
|
# We shell out to `manage.py` and pass `DJANGO_SETTINGS_MODULE` on
|
|
|
|
# the command line rather than just calling the migration
|
|
|
|
# functions, because Django doesn't support changing settings like
|
|
|
|
# what the database is as runtime.
|
2018-07-09 08:56:55 +02:00
|
|
|
# Also we export DB_NAME which is ignored by dev platform but
|
|
|
|
# recognised by test platform and used to migrate correct db.
|
2019-04-20 01:00:46 +02:00
|
|
|
run(['env', ('DJANGO_SETTINGS_MODULE=%s' % (settings,)), db_name,
|
2018-07-09 08:56:55 +02:00
|
|
|
'./manage.py', 'migrate', '--no-input'])
|
2019-04-20 01:00:46 +02:00
|
|
|
run(['env', ('DJANGO_SETTINGS_MODULE=%s' % (settings,)), db_name,
|
2018-07-09 08:56:55 +02:00
|
|
|
'./manage.py', 'get_migration_status',
|
2019-04-20 01:00:46 +02:00
|
|
|
'--output=%s' % (migration_status_file,)])
|
2018-06-06 01:08:27 +02:00
|
|
|
|
2019-05-29 00:15:11 +02:00
|
|
|
def update_test_databases_if_required(use_force: bool=False,
|
|
|
|
rebuild_test_database: bool=False) -> None:
|
2019-05-29 00:10:49 +02:00
|
|
|
"""Checks whether the zulip_test_template database template, is
|
2019-05-29 00:15:11 +02:00
|
|
|
consistent with our database migrations; if not, it updates it
|
2019-05-29 00:10:49 +02:00
|
|
|
in the fastest way possible:
|
|
|
|
|
|
|
|
* If all we need to do is add some migrations, just runs those
|
2019-05-29 00:15:11 +02:00
|
|
|
migrations on the template database.
|
|
|
|
* Otherwise, we rebuild the test template database from scratch.
|
|
|
|
|
|
|
|
The default behavior is sufficient for the `test-backend` use
|
|
|
|
case, where the test runner code will clone directly from the
|
|
|
|
template database.
|
|
|
|
|
|
|
|
The `rebuild_test_database` option (used by our Casper tests) asks
|
|
|
|
us to drop and re-cloning the zulip_test database from the
|
|
|
|
template so those test suites can run with a fresh copy.
|
2019-05-29 00:10:49 +02:00
|
|
|
|
|
|
|
If use_force is specified, it will always do a full rebuild.
|
|
|
|
"""
|
2018-06-06 00:49:48 +02:00
|
|
|
generate_fixtures_command = ['tools/setup/generate-fixtures']
|
2020-02-10 14:22:58 +01:00
|
|
|
test_template_db_status = template_database_status('test')
|
2018-06-06 00:49:48 +02:00
|
|
|
if use_force or test_template_db_status == 'needs_rebuild':
|
|
|
|
generate_fixtures_command.append('--force')
|
2018-06-06 01:08:27 +02:00
|
|
|
elif test_template_db_status == 'run_migrations':
|
|
|
|
run_db_migrations('test')
|
2019-05-29 00:15:11 +02:00
|
|
|
elif not rebuild_test_database:
|
|
|
|
return
|
2018-06-06 00:49:48 +02:00
|
|
|
subprocess.check_call(generate_fixtures_command)
|
|
|
|
|
2018-05-10 19:13:36 +02:00
|
|
|
def database_exists(database_name: str, **options: Any) -> bool:
|
2016-09-13 22:40:13 +02:00
|
|
|
db = options.get('database', DEFAULT_DB_ALIAS)
|
2016-11-11 14:11:11 +01:00
|
|
|
try:
|
|
|
|
connection = connections[db]
|
2016-09-13 22:40:13 +02:00
|
|
|
|
2016-11-11 14:11:11 +01:00
|
|
|
with connection.cursor() as cursor:
|
|
|
|
cursor.execute("SELECT 1 from pg_database WHERE datname='{}';".format(database_name))
|
|
|
|
return_value = bool(cursor.fetchone())
|
|
|
|
connections.close_all()
|
|
|
|
return return_value
|
|
|
|
except OperationalError:
|
|
|
|
return False
|
2016-09-13 22:40:13 +02:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def get_migration_status(**options: Any) -> str:
|
2016-09-13 22:40:13 +02:00
|
|
|
verbosity = options.get('verbosity', 1)
|
|
|
|
|
|
|
|
for app_config in apps.get_app_configs():
|
|
|
|
if module_has_submodule(app_config.module, "management"):
|
|
|
|
import_module('.management', app_config.name)
|
|
|
|
|
2018-01-31 06:31:06 +01:00
|
|
|
app_label = options['app_label'] if options.get('app_label') else None
|
2016-09-13 22:40:13 +02:00
|
|
|
db = options.get('database', DEFAULT_DB_ALIAS)
|
|
|
|
out = StringIO()
|
2020-02-04 13:13:36 +01:00
|
|
|
command_args = ['--list', ]
|
|
|
|
if app_label:
|
|
|
|
command_args.append(app_label)
|
|
|
|
|
2016-09-13 22:40:13 +02:00
|
|
|
call_command(
|
|
|
|
'showmigrations',
|
2020-02-04 13:13:36 +01:00
|
|
|
*command_args,
|
2016-09-13 22:40:13 +02:00
|
|
|
database=db,
|
|
|
|
no_color=options.get('no_color', False),
|
|
|
|
settings=options.get('settings', os.environ['DJANGO_SETTINGS_MODULE']),
|
|
|
|
stdout=out,
|
|
|
|
traceback=options.get('traceback', True),
|
|
|
|
verbosity=verbosity,
|
|
|
|
)
|
|
|
|
connections.close_all()
|
|
|
|
out.seek(0)
|
|
|
|
output = out.read()
|
2018-07-02 00:05:24 +02:00
|
|
|
return re.sub(r'\x1b\[(1|0)m', '', output)
|
2016-09-13 22:40:13 +02:00
|
|
|
|
2018-06-06 01:08:27 +02:00
|
|
|
def extract_migrations_as_list(migration_status: str) -> List[str]:
|
2018-07-02 00:05:24 +02:00
|
|
|
MIGRATIONS_RE = re.compile(r'\[[X| ]\] (\d+_.+)\n')
|
2018-06-06 01:08:27 +02:00
|
|
|
return MIGRATIONS_RE.findall(migration_status)
|
|
|
|
|
|
|
|
def what_to_do_with_migrations(migration_file: str, **options: Any) -> str:
|
2016-09-13 22:40:13 +02:00
|
|
|
if not os.path.exists(migration_file):
|
2018-06-06 01:08:27 +02:00
|
|
|
return 'scrap'
|
2016-09-13 22:40:13 +02:00
|
|
|
|
|
|
|
with open(migration_file) as f:
|
2018-06-06 01:08:27 +02:00
|
|
|
previous_migration_status = f.read()
|
|
|
|
current_migration_status = get_migration_status(**options)
|
|
|
|
all_curr_migrations = extract_migrations_as_list(current_migration_status)
|
|
|
|
all_prev_migrations = extract_migrations_as_list(previous_migration_status)
|
|
|
|
|
|
|
|
if len(all_curr_migrations) < len(all_prev_migrations):
|
|
|
|
return 'scrap'
|
|
|
|
|
|
|
|
for migration in all_prev_migrations:
|
|
|
|
if migration not in all_curr_migrations:
|
|
|
|
return 'scrap'
|
|
|
|
|
|
|
|
if len(all_curr_migrations) == len(all_prev_migrations):
|
|
|
|
return 'migrations_are_latest'
|
|
|
|
|
|
|
|
return 'migrate'
|
2016-09-13 22:40:13 +02:00
|
|
|
|
2017-11-05 11:15:10 +01:00
|
|
|
def _get_hash_file_path(source_file_path: str, status_dir: str) -> str:
|
2016-10-21 12:48:15 +02:00
|
|
|
basename = os.path.basename(source_file_path)
|
|
|
|
filename = '_'.join(FILENAME_SPLITTER.split(basename)).lower()
|
2017-06-02 21:45:24 +02:00
|
|
|
return os.path.join(status_dir, filename)
|
2016-10-21 12:48:15 +02:00
|
|
|
|
2018-05-21 18:09:55 +02:00
|
|
|
def _check_hash(source_hash_file: str, target_content: str) -> bool:
|
2016-10-19 11:37:32 +02:00
|
|
|
"""
|
|
|
|
This function has a side effect of creating a new hash file or
|
|
|
|
updating the old hash file.
|
|
|
|
"""
|
2018-05-21 18:09:55 +02:00
|
|
|
target_hash_content = hashlib.sha1(target_content.encode('utf8')).hexdigest()
|
2016-10-19 11:37:32 +02:00
|
|
|
|
2017-02-11 05:26:24 +01:00
|
|
|
if not os.path.exists(source_hash_file):
|
|
|
|
source_hash_content = None
|
|
|
|
else:
|
2016-10-21 07:21:39 +02:00
|
|
|
with open(source_hash_file) as f:
|
|
|
|
source_hash_content = f.read().strip()
|
2016-10-19 11:37:32 +02:00
|
|
|
|
2016-10-21 13:37:42 +02:00
|
|
|
with open(source_hash_file, 'w') as f:
|
2016-10-19 11:37:32 +02:00
|
|
|
f.write(target_hash_content)
|
2016-10-21 13:37:42 +02:00
|
|
|
|
|
|
|
return source_hash_content == target_hash_content
|
2016-10-19 11:37:32 +02:00
|
|
|
|
2018-05-21 18:09:55 +02:00
|
|
|
def check_file_hash(target_file_path: str, status_dir: str) -> bool:
|
|
|
|
source_hash_file = _get_hash_file_path(target_file_path, status_dir)
|
|
|
|
|
|
|
|
with open(target_file_path) as f:
|
|
|
|
target_content = f.read()
|
|
|
|
|
|
|
|
return _check_hash(source_hash_file, target_content)
|
|
|
|
|
|
|
|
def check_setting_hash(setting_name: str, status_dir: str) -> bool:
|
|
|
|
hash_filename = '_'.join(['settings', setting_name])
|
|
|
|
source_hash_file = os.path.join(status_dir, hash_filename)
|
|
|
|
|
|
|
|
target_content = json.dumps(getattr(settings, setting_name), sort_keys=True)
|
|
|
|
|
|
|
|
return _check_hash(source_hash_file, target_content)
|
|
|
|
|
2020-02-10 14:22:58 +01:00
|
|
|
def template_database_status(database_type: str) -> str:
|
2018-06-06 00:16:27 +02:00
|
|
|
# This function returns a status string specifying the type of
|
|
|
|
# state the template db is in and thus the kind of action required.
|
2020-02-10 14:22:58 +01:00
|
|
|
if database_type == 'dev':
|
|
|
|
database = DEV_DATABASE_TYPE
|
|
|
|
elif database_type == 'test':
|
|
|
|
database = TEST_DATABASE_TYPE
|
|
|
|
|
|
|
|
check_files = [
|
|
|
|
'zilencer/management/commands/populate_db.py',
|
|
|
|
'zerver/lib/bulk_create.py',
|
|
|
|
'zerver/lib/generate_test_data.py',
|
|
|
|
'zerver/lib/server_initialization.py',
|
|
|
|
'tools/setup/postgres-init-test-db',
|
|
|
|
'tools/setup/postgres-init-dev-db',
|
|
|
|
'zerver/migrations/0258_enable_online_push_notifications_default.py',
|
|
|
|
]
|
|
|
|
check_settings = [
|
|
|
|
'REALM_INTERNAL_BOTS',
|
|
|
|
]
|
2016-10-21 12:48:15 +02:00
|
|
|
|
2020-02-07 20:11:25 +01:00
|
|
|
# Construct a directory to store hashes named after the target database.
|
2020-02-10 14:22:58 +01:00
|
|
|
status_dir = os.path.join(UUID_VAR_DIR, database.database_name + '_db_status')
|
2017-06-02 21:45:24 +02:00
|
|
|
if not os.path.exists(status_dir):
|
|
|
|
os.mkdir(status_dir)
|
2016-10-21 12:48:15 +02:00
|
|
|
|
2020-02-10 14:22:58 +01:00
|
|
|
if database_exists(database.database_name):
|
2016-10-21 12:49:14 +02:00
|
|
|
# To ensure Python evaluates all the hash tests (and thus creates the
|
2016-10-19 11:37:32 +02:00
|
|
|
# hash files about the current state), we evaluate them in a
|
2016-10-21 12:49:14 +02:00
|
|
|
# list and then process the result
|
2018-05-21 18:09:55 +02:00
|
|
|
files_hash_status = all([check_file_hash(fn, status_dir) for fn in check_files])
|
|
|
|
settings_hash_status = all([check_setting_hash(setting_name, status_dir)
|
|
|
|
for setting_name in check_settings])
|
|
|
|
hash_status = files_hash_status and settings_hash_status
|
2018-06-06 01:08:27 +02:00
|
|
|
if not hash_status:
|
|
|
|
return 'needs_rebuild'
|
|
|
|
|
2019-05-31 16:55:45 +02:00
|
|
|
# Here we hash and compare our migration files before doing
|
|
|
|
# the work of seeing what to do with them; if there are no
|
|
|
|
# changes, we can safely assume we don't need to run
|
|
|
|
# migrations without spending a few 100ms parsing all the
|
|
|
|
# Python migration code.
|
|
|
|
paths = glob.glob('*/migrations/*.py')
|
2020-02-10 14:22:58 +01:00
|
|
|
check_migrations = file_or_package_hash_updated(paths, "migrations_hash_" + database.database_name,
|
2019-06-04 07:54:16 +02:00
|
|
|
is_force=False)
|
2019-05-31 16:55:45 +02:00
|
|
|
if not check_migrations:
|
|
|
|
return 'current'
|
|
|
|
|
2020-02-10 14:22:58 +01:00
|
|
|
migration_op = what_to_do_with_migrations(database.migration_status, settings=database.settings)
|
2018-06-06 01:08:27 +02:00
|
|
|
if migration_op == 'scrap':
|
|
|
|
return 'needs_rebuild'
|
|
|
|
|
|
|
|
if migration_op == 'migrate':
|
|
|
|
return 'run_migrations'
|
2018-05-21 18:09:55 +02:00
|
|
|
|
2018-06-06 01:08:27 +02:00
|
|
|
return 'current'
|
2016-10-19 11:37:32 +02:00
|
|
|
|
2018-06-06 00:16:27 +02:00
|
|
|
return 'needs_rebuild'
|
2019-06-08 03:57:19 +02:00
|
|
|
|
|
|
|
def destroy_leaked_test_databases(expiry_time: int = 60 * 60) -> int:
|
|
|
|
"""The logic in zerver/lib/test_runner.py tries to delete all the
|
|
|
|
temporary test databases generated by test-backend threads, but it
|
|
|
|
cannot guarantee it handles all race conditions correctly. This
|
|
|
|
is a catch-all function designed to delete any that might have
|
|
|
|
been leaked due to crashes (etc.). The high-level algorithm is to:
|
|
|
|
|
|
|
|
* Delete every database with a name like zulip_test_template_*
|
|
|
|
* Unless it is registered in a file under TEMPLATE_DATABASE_DIR as
|
|
|
|
part of a currently running test-backend invocation
|
|
|
|
* And that file is less expiry_time old.
|
|
|
|
|
|
|
|
This should ensure we ~never break a running test-backend process,
|
|
|
|
while also ensuring we will eventually delete all leaked databases.
|
|
|
|
"""
|
|
|
|
files = glob.glob(os.path.join(UUID_VAR_DIR, TEMPLATE_DATABASE_DIR, "*"))
|
|
|
|
test_databases = set() # type: Set[str]
|
|
|
|
try:
|
|
|
|
with connection.cursor() as cursor:
|
|
|
|
cursor.execute("SELECT datname FROM pg_database;")
|
|
|
|
rows = cursor.fetchall()
|
|
|
|
for row in rows:
|
|
|
|
if 'zulip_test_template_' in row[0]:
|
|
|
|
test_databases.add(row[0])
|
|
|
|
except ProgrammingError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
databases_in_use = set() # type: Set[str]
|
|
|
|
for file in files:
|
|
|
|
if round(time.time()) - os.path.getmtime(file) < expiry_time:
|
|
|
|
with open(file, "r") as f:
|
|
|
|
for line in f:
|
|
|
|
databases_in_use.add('zulip_test_template_{}'.format(line).rstrip())
|
|
|
|
else:
|
|
|
|
# Any test-backend run older than expiry_time can be
|
|
|
|
# cleaned up, both the database and the file listing its
|
|
|
|
# databases.
|
|
|
|
os.remove(file)
|
|
|
|
|
|
|
|
databases_to_drop = test_databases - databases_in_use
|
|
|
|
|
|
|
|
if not databases_to_drop:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
commands = "\n".join("DROP DATABASE IF EXISTS %s;" % (db,) for db in databases_to_drop)
|
|
|
|
p = subprocess.Popen(["psql", "-q", "-v", "ON_ERROR_STOP=1", "-h", "localhost",
|
|
|
|
"postgres", "zulip_test"],
|
|
|
|
stdin=subprocess.PIPE)
|
|
|
|
p.communicate(input=commands.encode())
|
|
|
|
if p.returncode != 0:
|
|
|
|
raise RuntimeError("Error cleaning up test databases!")
|
|
|
|
return len(databases_to_drop)
|
2019-07-06 00:29:17 +02:00
|
|
|
|
|
|
|
def remove_test_run_directories(expiry_time: int = 60 * 60) -> int:
|
|
|
|
removed = 0
|
|
|
|
directories = glob.glob(os.path.join(UUID_VAR_DIR, "test-backend", "run_*"))
|
|
|
|
for test_run in directories:
|
|
|
|
if round(time.time()) - os.path.getmtime(test_run) > expiry_time:
|
|
|
|
try:
|
|
|
|
shutil.rmtree(test_run)
|
|
|
|
removed += 1
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
|
|
|
return removed
|