
There are additional steps needed to enable "deploy host" operation for major release upgrades. This commit adds these additional steps, which are: 1. Create TO release platform config directory, done during upgrade-start on legacy upgrade procedure 2. Create TO release rabbitmq directory, done by upgrade manifest after controller-1 reinstall on legacy upgrade The commit also fixes some issues: 1. shell-utils logging functions logging nowhere after being sourced by other scripts 2. sync-controllers-feed script was only syncing the ostree_repo directory instead of the full feed content 3. major release deployment scripts ran from different places, now all scripts are executed from the TO release feed directory, or from checked out TO release ostree repo in case of chroot 4. change umount command from chroot_mounts to lazy umount Note: Since the "deploy host" endpoint for major release deployment is not yet implemented, the test plan will have test cases that simulate the "deploy host" operation. Test Plan PASS: simulate "deploy host" successfully for AIO-SX PASS: simulate "deploy host" successfully for AIO-DX Depends-on: https://review.opendev.org/c/starlingx/config/+/913715 Story: 2010676 Task: 49703 Change-Id: Ib6ae49b3590a1e50acb305ac7482e28bcc4de403 Signed-off-by: Heitor Matsui <heitorvieira.matsui@windriver.com>
331 lines
12 KiB
Python
331 lines
12 KiB
Python
#!/usr/bin/python3
|
|
# -*- encoding: utf-8 -*-
|
|
#
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
#
|
|
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
|
#
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
#
|
|
|
|
"""
|
|
Run platform upgrade prep data migration as a standalone executable
|
|
"""
|
|
|
|
import logging as LOG
|
|
import os
|
|
import shutil
|
|
import subprocess
|
|
import sys
|
|
|
|
import upgrade_utils
|
|
|
|
|
|
POSTGRES_PATH = '/var/lib/postgresql'
|
|
DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER = 'systemcontroller'
|
|
SERVICE_TYPE_IDENTITY = 'identity'
|
|
PLATFORM_PATH = "/opt/platform"
|
|
ETCD_PATH = '/opt/etcd'
|
|
RABBIT_PATH = '/var/lib/rabbitmq'
|
|
KUBERNETES_CONF_PATH = "/etc/kubernetes"
|
|
KUBERNETES_ADMIN_CONF_FILE = "admin.conf"
|
|
|
|
|
|
class DataMigration(object):
|
|
|
|
def __init__(self, rootdir, from_release, to_release, keystone_config):
|
|
try:
|
|
token, endpoint = upgrade_utils.get_token_endpoint(config=keystone_config,
|
|
service_type="platform")
|
|
_sysinv_client = upgrade_utils.get_sysinv_client(token=token,
|
|
endpoint=endpoint)
|
|
|
|
system_attributes = _sysinv_client.isystem.list()[0]
|
|
self.distributed_cloud_role = system_attributes.distributed_cloud_role
|
|
self.shared_services = system_attributes.capabilities.get('shared_services', '')
|
|
|
|
except Exception:
|
|
LOG.exception("Failed to get host attributes from sysinv")
|
|
raise
|
|
|
|
postgres_dest_dir = os.path.join(POSTGRES_PATH, "upgrade")
|
|
try:
|
|
os.makedirs(postgres_dest_dir, 0o755, exist_ok=True)
|
|
except OSError:
|
|
LOG.exception("Failed to create upgrade export directory %s." %
|
|
postgres_dest_dir)
|
|
raise
|
|
|
|
self.postgres_dest_dir = postgres_dest_dir
|
|
self.from_release = from_release
|
|
self.to_release = to_release
|
|
self.rootdir = rootdir
|
|
|
|
def export_postgres(self):
|
|
"""
|
|
Export postgres databases
|
|
"""
|
|
devnull = open(os.devnull, 'w')
|
|
try:
|
|
upgrade_databases, upgrade_database_skip_tables = self._get_upgrade_databases()
|
|
# Dump roles, table spaces and schemas for databases.
|
|
subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' +
|
|
'--schema-only > %s/%s' %
|
|
(self.postgres_dest_dir, 'postgres.postgreSql.config'))],
|
|
shell=True, stderr=devnull)
|
|
|
|
# Dump data for databases.
|
|
for _a, db_elem in enumerate(upgrade_databases):
|
|
|
|
db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts '
|
|
db_cmd += '--disable-triggers --data-only %s ' % db_elem
|
|
|
|
for _b, table_elem in \
|
|
enumerate(upgrade_database_skip_tables[db_elem]):
|
|
db_cmd += '--exclude-table=%s ' % table_elem
|
|
|
|
db_cmd += '> %s/%s.postgreSql.data' % (self.postgres_dest_dir, db_elem)
|
|
|
|
subprocess.check_call([db_cmd], shell=True, stderr=devnull)
|
|
LOG.info("Exporting postgres databases completed")
|
|
except subprocess.CalledProcessError as cpe:
|
|
LOG.exception("Failed to export postgres databases for upgrade.\nReturn code: %s, Error: %s.",
|
|
cpe.returncode, cpe.output)
|
|
raise
|
|
finally:
|
|
devnull.close()
|
|
|
|
def _get_upgrade_databases(self):
|
|
"""Gets the list of databases to be upgraded
|
|
:returns: the list of databases to be upgraded
|
|
"""
|
|
|
|
system_role = self.distributed_cloud_role
|
|
shared_services = self.shared_services
|
|
|
|
UPGRADE_DATABASES = ('postgres',
|
|
'template1',
|
|
'sysinv',
|
|
'barbican',
|
|
'fm',
|
|
)
|
|
|
|
UPGRADE_DATABASE_SKIP_TABLES = {'postgres': (),
|
|
'template1': (),
|
|
'sysinv': (),
|
|
'barbican': (),
|
|
'fm': ('alarm',),
|
|
}
|
|
|
|
if system_role == DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:
|
|
UPGRADE_DATABASES += ('dcmanager', 'dcorch',)
|
|
UPGRADE_DATABASE_SKIP_TABLES.update({
|
|
'dcmanager': (),
|
|
'dcorch': ('service', 'orch_job', 'orch_request',)
|
|
})
|
|
|
|
if SERVICE_TYPE_IDENTITY not in shared_services:
|
|
UPGRADE_DATABASES += ('keystone',)
|
|
UPGRADE_DATABASE_SKIP_TABLES.update({'keystone': ('token',)})
|
|
|
|
return UPGRADE_DATABASES, UPGRADE_DATABASE_SKIP_TABLES
|
|
|
|
def export_vim(self):
|
|
"""
|
|
Export VIM databases
|
|
"""
|
|
|
|
devnull = open(os.devnull, 'w')
|
|
try:
|
|
vim_cmd = ("nfv-vim-manage db-dump-data -d %s -f %s" %
|
|
(os.path.join(PLATFORM_PATH, 'nfv/vim', self.from_release),
|
|
os.path.join(self.postgres_dest_dir, 'vim.data')))
|
|
subprocess.check_call([vim_cmd], shell=True, stderr=devnull)
|
|
LOG.info("Exporting VIM completed")
|
|
except subprocess.CalledProcessError as cpe:
|
|
LOG.exception("Failed to export VIM databases for upgrade.\nReturn code: %s. Error: %s.",
|
|
cpe.returncode, cpe.output)
|
|
raise
|
|
finally:
|
|
devnull.close()
|
|
|
|
def export_etcd(self):
|
|
"""
|
|
Create symlink to etcd directory
|
|
"""
|
|
etcd_to_dir = os.path.join(ETCD_PATH, self.to_release)
|
|
etcd_from_dir = os.path.join(ETCD_PATH, self.from_release)
|
|
if not os.path.islink(etcd_to_dir) and not os.path.exists(etcd_to_dir):
|
|
os.symlink(etcd_from_dir, etcd_to_dir)
|
|
LOG.info("Creating symlink %s to %s completed", etcd_to_dir, etcd_from_dir)
|
|
|
|
def copy_kubernetes_conf(self):
|
|
"""
|
|
Copy /etc/kubernetes/admin.conf to $rootdir/usr/etc/kubernetes/
|
|
"""
|
|
devnull = open(os.devnull, 'w')
|
|
try:
|
|
from_k8s_admin_file = os.path.join(KUBERNETES_CONF_PATH, KUBERNETES_ADMIN_CONF_FILE)
|
|
to_k8s_admin_file_dir = os.path.join(self.rootdir, "usr", *KUBERNETES_CONF_PATH.split("/"))
|
|
os.makedirs(to_k8s_admin_file_dir, exist_ok=True)
|
|
subprocess.check_call(
|
|
["cp", from_k8s_admin_file, to_k8s_admin_file_dir], stdout=devnull)
|
|
LOG.info("Copied %s to %s completed", from_k8s_admin_file, to_k8s_admin_file_dir)
|
|
except subprocess.CalledProcessError as cpe:
|
|
LOG.exception("Failed to copy %s.\nReturn code: %s. Error: %s.",
|
|
from_k8s_admin_file, cpe.returncode, cpe.output)
|
|
raise
|
|
finally:
|
|
devnull.close()
|
|
|
|
def update_branding(self):
|
|
"""
|
|
Remove branding tar files from the release N+1 directory as branding
|
|
files are not compatible between releases.
|
|
"""
|
|
devnull = open(os.devnull, 'w')
|
|
try:
|
|
branding_files = os.path.join(
|
|
PLATFORM_PATH, "config", self.to_release, "branding", "*.tgz")
|
|
subprocess.check_call(["rm -f %s" % branding_files], shell=True,
|
|
stdout=devnull)
|
|
LOG.info("Removed branding files %s completed", branding_files)
|
|
except subprocess.CalledProcessError as cpe:
|
|
LOG.exception("Failed to remove branding files %s.\nReturn code: %s. Error: %s.",
|
|
branding_files, cpe.returncode, cpe.output)
|
|
raise
|
|
finally:
|
|
devnull.close()
|
|
|
|
def export_etc(self):
|
|
"""
|
|
Export /etc directory to $rootdir/etc
|
|
"""
|
|
src_dirs = ["platform", "sudoers.d"]
|
|
src_files = ["passwd", "shadow", "sudoers", "resolv.conf"]
|
|
devnull = open(os.devnull, 'w')
|
|
try:
|
|
# Create $rootdir/etc directory
|
|
etc_dest_dir = os.path.join(self.rootdir, "etc")
|
|
os.makedirs(etc_dest_dir, 0o755, exist_ok=True)
|
|
|
|
# Copy /etc/platform and /etc/sudoers.d directories
|
|
for src_dir in src_dirs:
|
|
temp_src_dir = os.path.join("/etc", src_dir)
|
|
subprocess.check_call(["cp", "-r", temp_src_dir, etc_dest_dir],
|
|
stdout=devnull)
|
|
LOG.info("Copied files in %s to %s/%s completed", temp_src_dir, etc_dest_dir, src_dir)
|
|
# Copy /etc/passwd, /etc/shadow, /etc/sudoers, /etc/resolv.conf files
|
|
for src_file in src_files:
|
|
temp_src_file = os.path.join("/etc", src_file)
|
|
temp_dest_file = os.path.join(etc_dest_dir, src_file)
|
|
subprocess.check_call(["cp", temp_src_file, temp_dest_file],
|
|
stdout=devnull)
|
|
LOG.info("Copied %s to %s completed", temp_src_file, temp_dest_file)
|
|
except subprocess.CalledProcessError as cpe:
|
|
LOG.exception("Failed to copy etc files %s.\nReturn code: %s. Error: %s.",
|
|
src_files, cpe.returncode, cpe.output)
|
|
raise
|
|
except Exception as e:
|
|
LOG.exception("Failed to export /etc directory. Error: %s.", str(e))
|
|
raise
|
|
finally:
|
|
devnull.close()
|
|
|
|
def create_platform_config(self):
|
|
"""
|
|
Create platform config for target release
|
|
"""
|
|
try:
|
|
platform_config_dir = os.path.join(PLATFORM_PATH, "config")
|
|
from_config_dir = os.path.join(platform_config_dir, self.from_release)
|
|
to_config_dir = os.path.join(platform_config_dir, self.to_release)
|
|
shutil.copytree(from_config_dir, to_config_dir)
|
|
except Exception as e:
|
|
LOG.exception("Failed to create platform config for release %s. "
|
|
"Error: %s" % (self.to_release, str(e)))
|
|
raise
|
|
|
|
def create_rabbitmq_directory(self):
|
|
"""
|
|
Create the target release rabbitmq directory
|
|
"""
|
|
try:
|
|
rabbit_dir = os.path.join("/var/lib/rabbitmq", self.to_release, "mnesia")
|
|
os.makedirs(rabbit_dir, exist_ok=True)
|
|
except Exception as e:
|
|
LOG.exception("Failed to create rabbitmq directory. Error: %s" % str(e))
|
|
raise
|
|
|
|
|
|
def main(sys_argv):
|
|
args = upgrade_utils.parse_arguments(sys_argv)
|
|
try:
|
|
rootdir = args["rootdir"]
|
|
from_release = args["from_release"]
|
|
to_release = args["to_release"]
|
|
except KeyError as e:
|
|
msg = "%s is not provided" % str(e)
|
|
LOG.error(msg)
|
|
print(msg)
|
|
upgrade_utils.print_usage(sys_argv[0])
|
|
return 1
|
|
|
|
if rootdir is None or from_release is None or to_release is None:
|
|
msg = "rootdir, from_release, or to_release are missing"
|
|
LOG.error(msg)
|
|
print(msg)
|
|
upgrade_utils.print_usage(sys_argv[0])
|
|
return 1
|
|
|
|
try:
|
|
keystone_config = upgrade_utils.get_keystone_config(args)
|
|
except Exception:
|
|
LOG.exception("Failed to get keystone configuration")
|
|
return 1
|
|
|
|
data_migration = DataMigration(rootdir, from_release, to_release, keystone_config)
|
|
|
|
LOG.info("Running data migration preparation from %s to %s" % (from_release, to_release))
|
|
|
|
try:
|
|
# export postgres databases
|
|
data_migration.export_postgres()
|
|
|
|
# Export VIM database
|
|
data_migration.export_vim()
|
|
|
|
# Point N+1 etcd to N for now. We will migrate when both controllers are
|
|
# running N+1, during the swact back to controller-0. This solution will
|
|
# present some problems when we do upgrade etcd, so further development
|
|
# will be required at that time.
|
|
data_migration.export_etcd()
|
|
|
|
# Copy /etc/kubernetes/admin.conf to $rootdir/usr/etc/kubernetes/
|
|
data_migration.copy_kubernetes_conf()
|
|
|
|
# Remove branding tar files from the release N+1 directory as branding
|
|
# files are not compatible between releases.
|
|
data_migration.update_branding()
|
|
|
|
# Export /etc directory to $rootdir/etc
|
|
data_migration.export_etc()
|
|
|
|
# Create platform config
|
|
data_migration.create_platform_config()
|
|
|
|
# Create rabbitmq directory
|
|
data_migration.create_rabbitmq_directory()
|
|
|
|
LOG.info("Data migration preparation completed successfully.")
|
|
|
|
except Exception as e:
|
|
LOG.exception("Data migration preparation failed.")
|
|
return 1
|
|
return 0
|
|
|
|
|
|
if __name__ == "__main__":
|
|
upgrade_utils.configure_logging("/var/log/software.log", log_level=LOG.INFO)
|
|
sys.exit(main(sys.argv))
|