Apply black formatter to dcmanager/orchestrator
This commit applies the Black format to the `dcmanager/orchestrator` files to ensure that it adheres to the Black code style guidelines. Test Plan: PASS: Success in stx-distcloud-tox-black Story: 2011149 Task: 50444 Change-Id: I89cd3c661eb783468fa486e685c7f2aec6a56f0f Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
parent
9c91fffa07
commit
30df79b36a
@ -1,5 +1,5 @@
|
||||
# Copyright 2017 Ericsson AB.
|
||||
# Copyright (c) 2017-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -17,14 +17,18 @@
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.orch_thread import OrchThread
|
||||
from dcmanager.orchestrator.states.firmware.applying_vim_strategy \
|
||||
import ApplyingVIMStrategyState
|
||||
from dcmanager.orchestrator.states.firmware.creating_vim_strategy \
|
||||
import CreatingVIMStrategyState
|
||||
from dcmanager.orchestrator.states.firmware.finishing_fw_update \
|
||||
import FinishingFwUpdateState
|
||||
from dcmanager.orchestrator.states.firmware.importing_firmware \
|
||||
import ImportingFirmwareState
|
||||
from dcmanager.orchestrator.states.firmware.applying_vim_strategy import (
|
||||
ApplyingVIMStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.firmware.creating_vim_strategy import (
|
||||
CreatingVIMStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.firmware.finishing_fw_update import (
|
||||
FinishingFwUpdateState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.firmware.importing_firmware import (
|
||||
ImportingFirmwareState,
|
||||
)
|
||||
|
||||
|
||||
class FwUpdateOrchThread(OrchThread):
|
||||
@ -43,16 +47,13 @@ class FwUpdateOrchThread(OrchThread):
|
||||
so, it executes the strategy, updating the strategy and steps in the
|
||||
database as it goes, with state and progress information.
|
||||
"""
|
||||
|
||||
# every state in fw orchestration must have an operator
|
||||
STATE_OPERATORS = {
|
||||
consts.STRATEGY_STATE_IMPORTING_FIRMWARE:
|
||||
ImportingFirmwareState,
|
||||
consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY:
|
||||
CreatingVIMStrategyState,
|
||||
consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY:
|
||||
ApplyingVIMStrategyState,
|
||||
consts.STRATEGY_STATE_FINISHING_FW_UPDATE:
|
||||
FinishingFwUpdateState,
|
||||
consts.STRATEGY_STATE_IMPORTING_FIRMWARE: ImportingFirmwareState,
|
||||
consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY: CreatingVIMStrategyState,
|
||||
consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY: ApplyingVIMStrategyState,
|
||||
consts.STRATEGY_STATE_FINISHING_FW_UPDATE: FinishingFwUpdateState,
|
||||
}
|
||||
|
||||
def __init__(self, strategy_lock, audit_rpc_client):
|
||||
@ -61,7 +62,8 @@ class FwUpdateOrchThread(OrchThread):
|
||||
audit_rpc_client,
|
||||
consts.SW_UPDATE_TYPE_FIRMWARE,
|
||||
vim.STRATEGY_NAME_FW_UPDATE,
|
||||
consts.STRATEGY_STATE_IMPORTING_FIRMWARE)
|
||||
consts.STRATEGY_STATE_IMPORTING_FIRMWARE,
|
||||
)
|
||||
|
||||
def trigger_audit(self):
|
||||
"""Trigger an audit for firmware"""
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -7,31 +7,39 @@ from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.orch_thread import OrchThread
|
||||
|
||||
from dcmanager.orchestrator.states.kube_rootca.applying_vim_strategy \
|
||||
import ApplyingVIMKubeRootcaUpdateStrategyState
|
||||
from dcmanager.orchestrator.states.kube_rootca.creating_vim_strategy \
|
||||
import CreatingVIMKubeRootcaUpdateStrategyState
|
||||
from dcmanager.orchestrator.states.kube_rootca.pre_check \
|
||||
import KubeRootcaUpdatePreCheckState
|
||||
from dcmanager.orchestrator.states.kube_rootca.start_update \
|
||||
import KubeRootcaUpdateStartState
|
||||
from dcmanager.orchestrator.states.kube_rootca.upload_cert \
|
||||
import KubeRootcaUpdateUploadCertState
|
||||
from dcmanager.orchestrator.states.kube_rootca.applying_vim_strategy import (
|
||||
ApplyingVIMKubeRootcaUpdateStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.kube_rootca.creating_vim_strategy import (
|
||||
CreatingVIMKubeRootcaUpdateStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.kube_rootca.pre_check import (
|
||||
KubeRootcaUpdatePreCheckState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.kube_rootca.start_update import (
|
||||
KubeRootcaUpdateStartState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.kube_rootca.upload_cert import (
|
||||
KubeRootcaUpdateUploadCertState,
|
||||
)
|
||||
|
||||
|
||||
class KubeRootcaUpdateOrchThread(OrchThread):
|
||||
"""Kube RootCA Update Orchestration Thread"""
|
||||
|
||||
# Reassign constants to avoid line length issues
|
||||
PRE_CHECK = consts.STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK
|
||||
START = consts.STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START
|
||||
UPLOAD_CERT = consts.STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT
|
||||
CREATE_VIM_STRATEGY = consts.STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
APPLY_VIM_STRATEGY = consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
|
||||
STATE_OPERATORS = {
|
||||
consts.STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK:
|
||||
KubeRootcaUpdatePreCheckState,
|
||||
consts.STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START:
|
||||
KubeRootcaUpdateStartState,
|
||||
consts.STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT:
|
||||
KubeRootcaUpdateUploadCertState,
|
||||
consts.STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY:
|
||||
CreatingVIMKubeRootcaUpdateStrategyState,
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY:
|
||||
ApplyingVIMKubeRootcaUpdateStrategyState,
|
||||
PRE_CHECK: KubeRootcaUpdatePreCheckState,
|
||||
START: KubeRootcaUpdateStartState,
|
||||
UPLOAD_CERT: KubeRootcaUpdateUploadCertState,
|
||||
CREATE_VIM_STRATEGY: CreatingVIMKubeRootcaUpdateStrategyState,
|
||||
APPLY_VIM_STRATEGY: ApplyingVIMKubeRootcaUpdateStrategyState,
|
||||
}
|
||||
|
||||
def __init__(self, strategy_lock, audit_rpc_client):
|
||||
@ -40,7 +48,8 @@ class KubeRootcaUpdateOrchThread(OrchThread):
|
||||
audit_rpc_client,
|
||||
consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE,
|
||||
vim.STRATEGY_NAME_KUBE_ROOTCA_UPDATE,
|
||||
consts.STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK)
|
||||
consts.STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK,
|
||||
)
|
||||
|
||||
def trigger_audit(self):
|
||||
"""Trigger an audit for kube rootca update"""
|
||||
|
@ -16,28 +16,31 @@
|
||||
#
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.cache.shared_cache_repository import \
|
||||
SharedCacheRepository
|
||||
from dcmanager.orchestrator.cache.shared_cache_repository import SharedCacheRepository
|
||||
from dcmanager.orchestrator.orch_thread import OrchThread
|
||||
from dcmanager.orchestrator.states.kube.applying_vim_kube_upgrade_strategy \
|
||||
import ApplyingVIMKubeUpgradeStrategyState
|
||||
from dcmanager.orchestrator.states.kube.creating_vim_kube_upgrade_strategy \
|
||||
import CreatingVIMKubeUpgradeStrategyState
|
||||
from dcmanager.orchestrator.states.kube.pre_check \
|
||||
import KubeUpgradePreCheckState
|
||||
from dcmanager.orchestrator.states.kube.applying_vim_kube_upgrade_strategy import (
|
||||
ApplyingVIMKubeUpgradeStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.kube.creating_vim_kube_upgrade_strategy import (
|
||||
CreatingVIMKubeUpgradeStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.kube.pre_check import KubeUpgradePreCheckState
|
||||
|
||||
|
||||
class KubeUpgradeOrchThread(OrchThread):
|
||||
"""Kube Upgrade Orchestration Thread"""
|
||||
|
||||
# Reassign constants to avoid line length issues
|
||||
PRE_CHECK = consts.STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK
|
||||
CREATE_VIM_STRATEGY = consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
APPLY_VIM_STRATEGY = consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
|
||||
# every state in kube orchestration must have an operator
|
||||
# The states are listed here in their typical execution order
|
||||
STATE_OPERATORS = {
|
||||
consts.STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK:
|
||||
KubeUpgradePreCheckState,
|
||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY:
|
||||
CreatingVIMKubeUpgradeStrategyState,
|
||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY:
|
||||
ApplyingVIMKubeUpgradeStrategyState,
|
||||
PRE_CHECK: KubeUpgradePreCheckState,
|
||||
CREATE_VIM_STRATEGY: CreatingVIMKubeUpgradeStrategyState,
|
||||
APPLY_VIM_STRATEGY: ApplyingVIMKubeUpgradeStrategyState,
|
||||
}
|
||||
|
||||
def __init__(self, strategy_lock, audit_rpc_client):
|
||||
@ -46,7 +49,8 @@ class KubeUpgradeOrchThread(OrchThread):
|
||||
audit_rpc_client,
|
||||
consts.SW_UPDATE_TYPE_KUBERNETES,
|
||||
vim.STRATEGY_NAME_KUBE_UPGRADE,
|
||||
consts.STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK)
|
||||
consts.STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK,
|
||||
)
|
||||
|
||||
# Initialize shared cache instances for the states that require them
|
||||
self._shared_caches = SharedCacheRepository(self.update_type)
|
||||
|
@ -55,15 +55,18 @@ class OrchThread(threading.Thread):
|
||||
so, it executes the strategy, updating the strategy and steps in the
|
||||
database as it goes, with state and progress information.
|
||||
"""
|
||||
|
||||
# each subclass must provide the STATE_OPERATORS
|
||||
STATE_OPERATORS = {}
|
||||
|
||||
def __init__(self,
|
||||
strategy_lock,
|
||||
audit_rpc_client,
|
||||
update_type,
|
||||
vim_strategy_name,
|
||||
starting_state):
|
||||
def __init__(
|
||||
self,
|
||||
strategy_lock,
|
||||
audit_rpc_client,
|
||||
update_type,
|
||||
vim_strategy_name,
|
||||
starting_state,
|
||||
):
|
||||
super(OrchThread, self).__init__()
|
||||
# Used to protect strategy when an atomic read/update is required.
|
||||
self.strategy_lock = strategy_lock
|
||||
@ -79,8 +82,7 @@ class OrchThread(threading.Thread):
|
||||
self.context = context.get_admin_context()
|
||||
self._stop = threading.Event()
|
||||
# Keeps track of greenthreads we create to do work.
|
||||
self.thread_group_manager = scheduler.ThreadGroupManager(
|
||||
thread_pool_size=500)
|
||||
self.thread_group_manager = scheduler.ThreadGroupManager(thread_pool_size=500)
|
||||
# Track worker created for each subcloud.
|
||||
self.subcloud_workers = dict()
|
||||
# Track if the strategy setup function was executed
|
||||
@ -89,8 +91,9 @@ class OrchThread(threading.Thread):
|
||||
@abc.abstractmethod
|
||||
def trigger_audit(self):
|
||||
"""Subclass MUST override this method"""
|
||||
LOG.warn("(%s) OrchThread subclass must override trigger_audit"
|
||||
% self.update_type)
|
||||
LOG.warn(
|
||||
"(%s) OrchThread subclass must override trigger_audit" % self.update_type
|
||||
)
|
||||
|
||||
def _pre_apply_setup(self):
|
||||
"""Setup performed once before a strategy starts to apply"""
|
||||
@ -149,16 +152,17 @@ class OrchThread(threading.Thread):
|
||||
@staticmethod
|
||||
def get_sysinv_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
|
||||
ks_client = OrchThread.get_ks_client(region_name)
|
||||
endpoint = ks_client.endpoint_cache.get_endpoint('sysinv')
|
||||
return SysinvClient(region_name,
|
||||
ks_client.session,
|
||||
endpoint=endpoint)
|
||||
endpoint = ks_client.endpoint_cache.get_endpoint("sysinv")
|
||||
return SysinvClient(region_name, ks_client.session, endpoint=endpoint)
|
||||
|
||||
@staticmethod
|
||||
def get_software_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
|
||||
ks_client = OrchThread.get_ks_client(region_name)
|
||||
return SoftwareClient(region_name, ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint('usm'))
|
||||
return SoftwareClient(
|
||||
region_name,
|
||||
ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint("usm"),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_patching_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
|
||||
@ -191,18 +195,16 @@ class OrchThread(threading.Thread):
|
||||
details = str(info)
|
||||
# details cannot exceed 255 chars. truncate and add '..'
|
||||
if len(details) > 255:
|
||||
details = details[:253] + '..'
|
||||
details = details[:253] + ".."
|
||||
return details
|
||||
|
||||
def determine_state_operator(self, strategy_step):
|
||||
"""Return the state operator for the current state"""
|
||||
state_operator = self.STATE_OPERATORS.get(strategy_step.state)
|
||||
# instantiate and return the state_operator class
|
||||
return state_operator(
|
||||
region_name=OrchThread.get_region_name(strategy_step))
|
||||
return state_operator(region_name=OrchThread.get_region_name(strategy_step))
|
||||
|
||||
def strategy_step_update(
|
||||
self, subcloud_id, state=None, details=None, stage=None):
|
||||
def strategy_step_update(self, subcloud_id, state=None, details=None, stage=None):
|
||||
"""Update the strategy step in the DB
|
||||
|
||||
Sets the start and finished timestamp if necessary, based on state.
|
||||
@ -211,24 +213,29 @@ class OrchThread(threading.Thread):
|
||||
finished_at = None
|
||||
if state == self.starting_state:
|
||||
started_at = datetime.datetime.now()
|
||||
elif state in [consts.STRATEGY_STATE_COMPLETE,
|
||||
consts.STRATEGY_STATE_ABORTED,
|
||||
consts.STRATEGY_STATE_FAILED]:
|
||||
elif state in [
|
||||
consts.STRATEGY_STATE_COMPLETE,
|
||||
consts.STRATEGY_STATE_ABORTED,
|
||||
consts.STRATEGY_STATE_FAILED,
|
||||
]:
|
||||
finished_at = datetime.datetime.now()
|
||||
# Return the updated object, in case we need to use its updated values
|
||||
return db_api.strategy_step_update(self.context,
|
||||
subcloud_id,
|
||||
stage=stage,
|
||||
state=state,
|
||||
details=details,
|
||||
started_at=started_at,
|
||||
finished_at=finished_at)
|
||||
return db_api.strategy_step_update(
|
||||
self.context,
|
||||
subcloud_id,
|
||||
stage=stage,
|
||||
state=state,
|
||||
details=details,
|
||||
started_at=started_at,
|
||||
finished_at=finished_at,
|
||||
)
|
||||
|
||||
def _delete_subcloud_worker(self, region, subcloud_id):
|
||||
db_api.strategy_step_update(
|
||||
self.context,
|
||||
subcloud_id,
|
||||
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_PROCESSED)
|
||||
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_PROCESSED,
|
||||
)
|
||||
if region in self.subcloud_workers:
|
||||
# The orchestration for this subcloud has either
|
||||
# completed/failed/aborted, remove it from the
|
||||
@ -239,23 +246,25 @@ class OrchThread(threading.Thread):
|
||||
def run_orch(self):
|
||||
while not self.stopped():
|
||||
try:
|
||||
LOG.debug('(%s) OrchThread Running' % self.update_type)
|
||||
LOG.debug("(%s) OrchThread Running" % self.update_type)
|
||||
|
||||
sw_update_strategy = db_api.sw_update_strategy_get(
|
||||
self.context,
|
||||
update_type=self.update_type)
|
||||
self.context, update_type=self.update_type
|
||||
)
|
||||
|
||||
if sw_update_strategy.type == self.update_type:
|
||||
if sw_update_strategy.state in [
|
||||
consts.SW_UPDATE_STATE_APPLYING,
|
||||
consts.SW_UPDATE_STATE_ABORTING]:
|
||||
consts.SW_UPDATE_STATE_APPLYING,
|
||||
consts.SW_UPDATE_STATE_ABORTING,
|
||||
]:
|
||||
self._pre_apply_setup()
|
||||
self.apply(sw_update_strategy)
|
||||
elif sw_update_strategy.state == \
|
||||
consts.SW_UPDATE_STATE_ABORT_REQUESTED:
|
||||
elif (
|
||||
sw_update_strategy.state
|
||||
== consts.SW_UPDATE_STATE_ABORT_REQUESTED
|
||||
):
|
||||
self.abort(sw_update_strategy)
|
||||
elif sw_update_strategy.state == \
|
||||
consts.SW_UPDATE_STATE_DELETING:
|
||||
elif sw_update_strategy.state == consts.SW_UPDATE_STATE_DELETING:
|
||||
self.delete(sw_update_strategy)
|
||||
self._post_delete_teardown()
|
||||
|
||||
@ -265,8 +274,7 @@ class OrchThread(threading.Thread):
|
||||
|
||||
except Exception:
|
||||
# We catch all exceptions to avoid terminating the thread.
|
||||
LOG.exception("(%s) OrchThread unexpected exception"
|
||||
% self.update_type)
|
||||
LOG.exception("(%s) OrchThread unexpected exception" % self.update_type)
|
||||
|
||||
# Wake up every 10 seconds to see if there is work to do.
|
||||
time.sleep(10)
|
||||
@ -285,30 +293,35 @@ class OrchThread(threading.Thread):
|
||||
for strategy_step in strategy_steps:
|
||||
if strategy_step.state == consts.STRATEGY_STATE_COMPLETE:
|
||||
# This step is complete
|
||||
self._delete_subcloud_worker(strategy_step.subcloud.region_name,
|
||||
strategy_step.subcloud_id)
|
||||
self._delete_subcloud_worker(
|
||||
strategy_step.subcloud.region_name, strategy_step.subcloud_id
|
||||
)
|
||||
continue
|
||||
elif strategy_step.state == consts.STRATEGY_STATE_ABORTED:
|
||||
# This step was aborted
|
||||
self._delete_subcloud_worker(strategy_step.subcloud.region_name,
|
||||
strategy_step.subcloud_id)
|
||||
self._delete_subcloud_worker(
|
||||
strategy_step.subcloud.region_name, strategy_step.subcloud_id
|
||||
)
|
||||
abort_detected = True
|
||||
continue
|
||||
elif strategy_step.state == consts.STRATEGY_STATE_FAILED:
|
||||
failure_detected = True
|
||||
self._delete_subcloud_worker(strategy_step.subcloud.region_name,
|
||||
strategy_step.subcloud_id)
|
||||
self._delete_subcloud_worker(
|
||||
strategy_step.subcloud.region_name, strategy_step.subcloud_id
|
||||
)
|
||||
# This step has failed and needs no further action
|
||||
if strategy_step.subcloud_id is None:
|
||||
# Strategy on SystemController failed. We are done.
|
||||
LOG.info("(%s) Stopping strategy due to failure while "
|
||||
"processing update step on SystemController"
|
||||
% self.update_type)
|
||||
LOG.info(
|
||||
"(%s) Stopping strategy due to failure while "
|
||||
"processing update step on SystemController" % self.update_type
|
||||
)
|
||||
with self.strategy_lock:
|
||||
db_api.sw_update_strategy_update(
|
||||
self.context,
|
||||
state=consts.SW_UPDATE_STATE_FAILED,
|
||||
update_type=self.update_type)
|
||||
update_type=self.update_type,
|
||||
)
|
||||
# Trigger audit to update the sync status for
|
||||
# each subcloud.
|
||||
self.trigger_audit()
|
||||
@ -323,29 +336,29 @@ class OrchThread(threading.Thread):
|
||||
else:
|
||||
# The strategy application is complete
|
||||
if failure_detected:
|
||||
LOG.info("(%s) Strategy application has failed."
|
||||
% self.update_type)
|
||||
LOG.info("(%s) Strategy application has failed." % self.update_type)
|
||||
with self.strategy_lock:
|
||||
db_api.sw_update_strategy_update(
|
||||
self.context,
|
||||
state=consts.SW_UPDATE_STATE_FAILED,
|
||||
update_type=self.update_type)
|
||||
update_type=self.update_type,
|
||||
)
|
||||
elif abort_detected:
|
||||
LOG.info("(%s) Strategy application was aborted."
|
||||
% self.update_type)
|
||||
LOG.info("(%s) Strategy application was aborted." % self.update_type)
|
||||
with self.strategy_lock:
|
||||
db_api.sw_update_strategy_update(
|
||||
self.context,
|
||||
state=consts.SW_UPDATE_STATE_ABORTED,
|
||||
update_type=self.update_type)
|
||||
update_type=self.update_type,
|
||||
)
|
||||
else:
|
||||
LOG.info("(%s) Strategy application is complete."
|
||||
% self.update_type)
|
||||
LOG.info("(%s) Strategy application is complete." % self.update_type)
|
||||
with self.strategy_lock:
|
||||
db_api.sw_update_strategy_update(
|
||||
self.context,
|
||||
state=consts.SW_UPDATE_STATE_COMPLETE,
|
||||
update_type=self.update_type)
|
||||
update_type=self.update_type,
|
||||
)
|
||||
|
||||
self.subcloud_workers.clear()
|
||||
# Trigger audit to update the sync status for each subcloud.
|
||||
@ -360,13 +373,13 @@ class OrchThread(threading.Thread):
|
||||
|
||||
if not work_remaining:
|
||||
# We have completed the remaining steps
|
||||
LOG.info("(%s) Stopping strategy due to failure"
|
||||
% self.update_type)
|
||||
LOG.info("(%s) Stopping strategy due to failure" % self.update_type)
|
||||
with self.strategy_lock:
|
||||
db_api.sw_update_strategy_update(
|
||||
self.context,
|
||||
state=consts.SW_UPDATE_STATE_FAILED,
|
||||
update_type=self.update_type)
|
||||
update_type=self.update_type,
|
||||
)
|
||||
# Trigger audit to update the sync status for each subcloud.
|
||||
self.trigger_audit()
|
||||
return
|
||||
@ -374,41 +387,40 @@ class OrchThread(threading.Thread):
|
||||
for strategy_step in strategy_steps:
|
||||
region = self.get_region_name(strategy_step)
|
||||
if self.stopped():
|
||||
LOG.info("(%s) Exiting because task is stopped"
|
||||
% self.update_type)
|
||||
LOG.info("(%s) Exiting because task is stopped" % self.update_type)
|
||||
self.subcloud_workers.clear()
|
||||
return
|
||||
if strategy_step.state == \
|
||||
consts.STRATEGY_STATE_FAILED:
|
||||
LOG.debug("(%s) Intermediate step is failed"
|
||||
% self.update_type)
|
||||
self._delete_subcloud_worker(region,
|
||||
strategy_step.subcloud_id)
|
||||
if strategy_step.state == consts.STRATEGY_STATE_FAILED:
|
||||
LOG.debug("(%s) Intermediate step is failed" % self.update_type)
|
||||
self._delete_subcloud_worker(region, strategy_step.subcloud_id)
|
||||
continue
|
||||
elif strategy_step.state == \
|
||||
consts.STRATEGY_STATE_COMPLETE:
|
||||
LOG.debug("(%s) Intermediate step is complete"
|
||||
% self.update_type)
|
||||
self._delete_subcloud_worker(region,
|
||||
strategy_step.subcloud_id)
|
||||
elif strategy_step.state == consts.STRATEGY_STATE_COMPLETE:
|
||||
LOG.debug("(%s) Intermediate step is complete" % self.update_type)
|
||||
self._delete_subcloud_worker(region, strategy_step.subcloud_id)
|
||||
continue
|
||||
elif strategy_step.state == \
|
||||
consts.STRATEGY_STATE_INITIAL:
|
||||
if sw_update_strategy.max_parallel_subclouds > \
|
||||
len(self.subcloud_workers) and not stop:
|
||||
elif strategy_step.state == consts.STRATEGY_STATE_INITIAL:
|
||||
if (
|
||||
sw_update_strategy.max_parallel_subclouds
|
||||
> len(self.subcloud_workers)
|
||||
and not stop
|
||||
):
|
||||
# Don't start upgrading this subcloud if it has been
|
||||
# unmanaged by the user. If orchestration was already
|
||||
# started, it will be allowed to complete.
|
||||
if strategy_step.subcloud_id is not None and \
|
||||
strategy_step.subcloud.management_state == \
|
||||
dccommon_consts.MANAGEMENT_UNMANAGED:
|
||||
message = ("Subcloud %s is unmanaged." %
|
||||
strategy_step.subcloud.name)
|
||||
if (
|
||||
strategy_step.subcloud_id is not None
|
||||
and strategy_step.subcloud.management_state
|
||||
== dccommon_consts.MANAGEMENT_UNMANAGED
|
||||
):
|
||||
message = (
|
||||
"Subcloud %s is unmanaged." % strategy_step.subcloud.name
|
||||
)
|
||||
LOG.warn(message)
|
||||
self.strategy_step_update(
|
||||
strategy_step.subcloud_id,
|
||||
state=consts.STRATEGY_STATE_FAILED,
|
||||
details=message)
|
||||
details=message,
|
||||
)
|
||||
continue
|
||||
|
||||
# We are just getting started, enter the first state
|
||||
@ -416,15 +428,12 @@ class OrchThread(threading.Thread):
|
||||
strategy_step = self.strategy_step_update(
|
||||
strategy_step.subcloud_id,
|
||||
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_STARTED,
|
||||
state=self.starting_state)
|
||||
state=self.starting_state,
|
||||
)
|
||||
# Starting state should log an error if greenthread exists
|
||||
self.process_update_step(region,
|
||||
strategy_step,
|
||||
log_error=True)
|
||||
self.process_update_step(region, strategy_step, log_error=True)
|
||||
else:
|
||||
self.process_update_step(region,
|
||||
strategy_step,
|
||||
log_error=False)
|
||||
self.process_update_step(region, strategy_step, log_error=False)
|
||||
|
||||
def abort(self, sw_update_strategy):
|
||||
"""Abort an update strategy"""
|
||||
@ -437,19 +446,22 @@ class OrchThread(threading.Thread):
|
||||
|
||||
for strategy_step in strategy_steps:
|
||||
if strategy_step.state == consts.STRATEGY_STATE_INITIAL:
|
||||
LOG.info("(%s) Aborting step for subcloud %s"
|
||||
% (self.update_type,
|
||||
self.get_region_name(strategy_step)))
|
||||
LOG.info(
|
||||
"(%s) Aborting step for subcloud %s"
|
||||
% (self.update_type, self.get_region_name(strategy_step))
|
||||
)
|
||||
self.strategy_step_update(
|
||||
strategy_step.subcloud_id,
|
||||
state=consts.STRATEGY_STATE_ABORTED,
|
||||
details="")
|
||||
details="",
|
||||
)
|
||||
|
||||
with self.strategy_lock:
|
||||
db_api.sw_update_strategy_update(
|
||||
self.context,
|
||||
state=consts.SW_UPDATE_STATE_ABORTING,
|
||||
update_type=self.update_type)
|
||||
update_type=self.update_type,
|
||||
)
|
||||
|
||||
def delete(self, sw_update_strategy):
|
||||
"""Delete an update strategy"""
|
||||
@ -466,16 +478,14 @@ class OrchThread(threading.Thread):
|
||||
LOG.debug("Worker already exists for %s." % region)
|
||||
else:
|
||||
# Create a greenthread to delete the subcloud strategy
|
||||
delete_thread = \
|
||||
self.thread_group_manager.start(
|
||||
self.delete_subcloud_strategy,
|
||||
strategy_step)
|
||||
delete_thread = self.thread_group_manager.start(
|
||||
self.delete_subcloud_strategy, strategy_step
|
||||
)
|
||||
if delete_thread:
|
||||
self.subcloud_workers[region] = delete_thread
|
||||
|
||||
if self.stopped():
|
||||
LOG.info("(%s) Exiting because task is stopped"
|
||||
% self.update_type)
|
||||
LOG.info("(%s) Exiting because task is stopped" % self.update_type)
|
||||
return
|
||||
|
||||
# Wait for 180 seconds so that last 100 workers can
|
||||
@ -493,8 +503,7 @@ class OrchThread(threading.Thread):
|
||||
db_api.strategy_step_destroy_all(self.context)
|
||||
db_api.sw_update_strategy_destroy(self.context)
|
||||
except Exception as e:
|
||||
LOG.exception("(%s) exception during delete"
|
||||
% self.update_type)
|
||||
LOG.exception("(%s) exception during delete" % self.update_type)
|
||||
raise e
|
||||
LOG.info("(%s) Finished deleting update strategy" % self.update_type)
|
||||
|
||||
@ -522,40 +531,56 @@ class OrchThread(threading.Thread):
|
||||
|
||||
region = self.get_region_name(strategy_step)
|
||||
|
||||
LOG.info("(%s) Deleting vim strategy:(%s) for region:(%s)"
|
||||
% (self.update_type, self.vim_strategy_name, region))
|
||||
LOG.info(
|
||||
"(%s) Deleting vim strategy:(%s) for region:(%s)"
|
||||
% (self.update_type, self.vim_strategy_name, region)
|
||||
)
|
||||
|
||||
# First check if the strategy has been created.
|
||||
try:
|
||||
subcloud_strategy = OrchThread.get_vim_client(region).get_strategy(
|
||||
strategy_name=self.vim_strategy_name)
|
||||
strategy_name=self.vim_strategy_name
|
||||
)
|
||||
except (keystone_exceptions.EndpointNotFound, IndexError):
|
||||
message = ("(%s) Endpoint for subcloud: %s not found." %
|
||||
(self.update_type, region))
|
||||
message = "(%s) Endpoint for subcloud: %s not found." % (
|
||||
self.update_type,
|
||||
region,
|
||||
)
|
||||
LOG.warn(message)
|
||||
return
|
||||
except Exception:
|
||||
# Strategy doesn't exist so there is nothing to do
|
||||
return
|
||||
|
||||
if subcloud_strategy.state in [vim.STATE_BUILDING,
|
||||
vim.STATE_APPLYING,
|
||||
vim.STATE_ABORTING]:
|
||||
if subcloud_strategy.state in [
|
||||
vim.STATE_BUILDING,
|
||||
vim.STATE_APPLYING,
|
||||
vim.STATE_ABORTING,
|
||||
]:
|
||||
# Can't delete a vim strategy in these states
|
||||
message = ("(%s) Vim strategy:(%s) for region:(%s)"
|
||||
" in wrong state:(%s) for delete."
|
||||
% (self.update_type, self.vim_strategy_name, region,
|
||||
subcloud_strategy.state))
|
||||
message = (
|
||||
"(%s) Vim strategy:(%s) for region:(%s) in wrong state:(%s) for delete."
|
||||
% (
|
||||
self.update_type,
|
||||
self.vim_strategy_name,
|
||||
region,
|
||||
subcloud_strategy.state,
|
||||
)
|
||||
)
|
||||
LOG.warn(message)
|
||||
return
|
||||
|
||||
# If we are here, we need to delete the strategy
|
||||
try:
|
||||
OrchThread.get_vim_client(region).delete_strategy(
|
||||
strategy_name=self.vim_strategy_name)
|
||||
strategy_name=self.vim_strategy_name
|
||||
)
|
||||
except Exception:
|
||||
message = ("(%s) Vim strategy:(%s) delete failed for region:(%s)"
|
||||
% (self.update_type, self.vim_strategy_name, region))
|
||||
message = "(%s) Vim strategy:(%s) delete failed for region:(%s)" % (
|
||||
self.update_type,
|
||||
self.vim_strategy_name,
|
||||
region,
|
||||
)
|
||||
LOG.warn(message)
|
||||
return
|
||||
|
||||
@ -565,63 +590,92 @@ class OrchThread(threading.Thread):
|
||||
if self.subcloud_workers[region][0] == strategy_step.state:
|
||||
# A worker already exists. Let it finish whatever it was doing.
|
||||
if log_error:
|
||||
LOG.error("(%s) Worker should not exist for %s."
|
||||
% (self.update_type, region))
|
||||
LOG.error(
|
||||
"(%s) Worker should not exist for %s."
|
||||
% (self.update_type, region)
|
||||
)
|
||||
else:
|
||||
LOG.debug("(%s) Update worker exists for %s."
|
||||
% (self.update_type, region))
|
||||
LOG.debug(
|
||||
"(%s) Update worker exists for %s." % (self.update_type, region)
|
||||
)
|
||||
else:
|
||||
LOG.debug("Starting a new worker for region %s at state %s (update)"
|
||||
% (region, strategy_step.state))
|
||||
LOG.debug(
|
||||
"Starting a new worker for region %s at state %s (update)"
|
||||
% (region, strategy_step.state)
|
||||
)
|
||||
# Advance to the next state. The previous greenthread has exited,
|
||||
# create a new one.
|
||||
self.subcloud_workers[region] = \
|
||||
(strategy_step.state, self.thread_group_manager.start(
|
||||
self.perform_state_action, strategy_step))
|
||||
self.subcloud_workers[region] = (
|
||||
strategy_step.state,
|
||||
self.thread_group_manager.start(
|
||||
self.perform_state_action, strategy_step
|
||||
),
|
||||
)
|
||||
else:
|
||||
# This is the first state. create a greenthread to start processing
|
||||
# the update for the subcloud and invoke the perform_state_action method.
|
||||
LOG.debug("Starting a new worker for region %s at state %s"
|
||||
% (region, strategy_step.state))
|
||||
self.subcloud_workers[region] = \
|
||||
(strategy_step.state, self.thread_group_manager.start(
|
||||
self.perform_state_action, strategy_step))
|
||||
LOG.debug(
|
||||
"Starting a new worker for region %s at state %s"
|
||||
% (region, strategy_step.state)
|
||||
)
|
||||
self.subcloud_workers[region] = (
|
||||
strategy_step.state,
|
||||
self.thread_group_manager.start(
|
||||
self.perform_state_action, strategy_step
|
||||
),
|
||||
)
|
||||
|
||||
def perform_state_action(self, strategy_step):
|
||||
"""Extensible state handler for processing and transitioning states """
|
||||
"""Extensible state handler for processing and transitioning states"""
|
||||
try:
|
||||
LOG.info("(%s) Stage: %s, State: %s, Subcloud: %s"
|
||||
% (self.update_type,
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step)))
|
||||
LOG.info(
|
||||
"(%s) Stage: %s, State: %s, Subcloud: %s"
|
||||
% (
|
||||
self.update_type,
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
)
|
||||
)
|
||||
# Instantiate the state operator and perform the state actions
|
||||
state_operator = self.determine_state_operator(strategy_step)
|
||||
state_operator.registerStopEvent(self._stop)
|
||||
next_state = state_operator.perform_state_action(strategy_step)
|
||||
self.strategy_step_update(strategy_step.subcloud_id,
|
||||
state=next_state,
|
||||
details="")
|
||||
self.strategy_step_update(
|
||||
strategy_step.subcloud_id, state=next_state, details=""
|
||||
)
|
||||
except exceptions.StrategySkippedException as ex:
|
||||
LOG.info("(%s) Skipping subcloud, Stage: %s, State: %s, Subcloud: %s"
|
||||
% (self.update_type,
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
strategy_step.subcloud.name))
|
||||
LOG.info(
|
||||
"(%s) Skipping subcloud, Stage: %s, State: %s, Subcloud: %s"
|
||||
% (
|
||||
self.update_type,
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
strategy_step.subcloud.name,
|
||||
)
|
||||
)
|
||||
# Transition immediately to complete. Update the details to show
|
||||
# that this subcloud has been skipped
|
||||
details = self.format_update_details(None, str(ex))
|
||||
self.strategy_step_update(strategy_step.subcloud_id,
|
||||
state=consts.STRATEGY_STATE_COMPLETE,
|
||||
details=details)
|
||||
self.strategy_step_update(
|
||||
strategy_step.subcloud_id,
|
||||
state=consts.STRATEGY_STATE_COMPLETE,
|
||||
details=details,
|
||||
)
|
||||
except Exception as ex:
|
||||
# Catch ALL exceptions and set the strategy to failed
|
||||
LOG.exception("(%s) Failed! Stage: %s, State: %s, Subcloud: %s"
|
||||
% (self.update_type,
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
strategy_step.subcloud.name))
|
||||
LOG.exception(
|
||||
"(%s) Failed! Stage: %s, State: %s, Subcloud: %s"
|
||||
% (
|
||||
self.update_type,
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
strategy_step.subcloud.name,
|
||||
)
|
||||
)
|
||||
details = self.format_update_details(strategy_step.state, str(ex))
|
||||
self.strategy_step_update(strategy_step.subcloud_id,
|
||||
state=consts.STRATEGY_STATE_FAILED,
|
||||
details=details)
|
||||
self.strategy_step_update(
|
||||
strategy_step.subcloud_id,
|
||||
state=consts.STRATEGY_STATE_FAILED,
|
||||
details=details,
|
||||
)
|
||||
|
@ -20,10 +20,12 @@ from oslo_log import log as logging
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.orch_thread import OrchThread
|
||||
from dcmanager.orchestrator.states.patch.applying_vim_patch_strategy import \
|
||||
ApplyingVIMPatchStrategyState
|
||||
from dcmanager.orchestrator.states.patch.creating_vim_patch_strategy import \
|
||||
CreatingVIMPatchStrategyState
|
||||
from dcmanager.orchestrator.states.patch.applying_vim_patch_strategy import (
|
||||
ApplyingVIMPatchStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.patch.creating_vim_patch_strategy import (
|
||||
CreatingVIMPatchStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.patch.pre_check import PreCheckState
|
||||
from dcmanager.orchestrator.states.patch.updating_patches import UpdatingPatchesState
|
||||
|
||||
@ -31,15 +33,17 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PatchOrchThread(OrchThread):
|
||||
# Reassign constants to avoid line length issues
|
||||
PRE_CHECK = consts.STRATEGY_STATE_PRE_CHECK
|
||||
UPDATING_PATCHES = consts.STRATEGY_STATE_UPDATING_PATCHES
|
||||
CREATE_VIM_STRATEGY = consts.STRATEGY_STATE_CREATING_VIM_PATCH_STRATEGY
|
||||
APPLY_VIM_STRATEGY = consts.STRATEGY_STATE_APPLYING_VIM_PATCH_STRATEGY
|
||||
|
||||
STATE_OPERATORS = {
|
||||
consts.STRATEGY_STATE_PRE_CHECK:
|
||||
PreCheckState,
|
||||
consts.STRATEGY_STATE_UPDATING_PATCHES:
|
||||
UpdatingPatchesState,
|
||||
consts.STRATEGY_STATE_CREATING_VIM_PATCH_STRATEGY:
|
||||
CreatingVIMPatchStrategyState,
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_PATCH_STRATEGY:
|
||||
ApplyingVIMPatchStrategyState,
|
||||
PRE_CHECK: PreCheckState,
|
||||
UPDATING_PATCHES: UpdatingPatchesState,
|
||||
CREATE_VIM_STRATEGY: CreatingVIMPatchStrategyState,
|
||||
APPLY_VIM_STRATEGY: ApplyingVIMPatchStrategyState,
|
||||
}
|
||||
|
||||
def __init__(self, strategy_lock, audit_rpc_client):
|
||||
@ -48,7 +52,8 @@ class PatchOrchThread(OrchThread):
|
||||
audit_rpc_client,
|
||||
consts.SW_UPDATE_TYPE_PATCH,
|
||||
vim.STRATEGY_NAME_SW_PATCH,
|
||||
starting_state=consts.STRATEGY_STATE_PRE_CHECK)
|
||||
starting_state=consts.STRATEGY_STATE_PRE_CHECK,
|
||||
)
|
||||
|
||||
def determine_state_operator(self, strategy_step):
|
||||
state = super(PatchOrchThread, self).determine_state_operator(strategy_step)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2022-2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -28,12 +28,9 @@ class PrestageOrchThread(OrchThread):
|
||||
# Every state in prestage orchestration must have an operator
|
||||
# The states are listed here in their typical execution order
|
||||
STATE_OPERATORS = {
|
||||
consts.STRATEGY_STATE_PRESTAGE_PRE_CHECK:
|
||||
states.PrestagePreCheckState,
|
||||
consts.STRATEGY_STATE_PRESTAGE_PACKAGES:
|
||||
states.PrestagePackagesState,
|
||||
consts.STRATEGY_STATE_PRESTAGE_IMAGES:
|
||||
states.PrestageImagesState,
|
||||
consts.STRATEGY_STATE_PRESTAGE_PRE_CHECK: states.PrestagePreCheckState,
|
||||
consts.STRATEGY_STATE_PRESTAGE_PACKAGES: states.PrestagePackagesState,
|
||||
consts.STRATEGY_STATE_PRESTAGE_IMAGES: states.PrestageImagesState,
|
||||
}
|
||||
|
||||
def __init__(self, strategy_lock, audit_rpc_client):
|
||||
@ -42,7 +39,8 @@ class PrestageOrchThread(OrchThread):
|
||||
audit_rpc_client,
|
||||
consts.SW_UPDATE_TYPE_PRESTAGE,
|
||||
None,
|
||||
consts.STRATEGY_STATE_PRESTAGE_PRE_CHECK)
|
||||
consts.STRATEGY_STATE_PRESTAGE_PRE_CHECK,
|
||||
)
|
||||
|
||||
def trigger_audit(self):
|
||||
"""Trigger an audit"""
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -27,13 +27,14 @@ class ManagerOrchestratorClient(object):
|
||||
1.0 - Initial version
|
||||
"""
|
||||
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
BASE_RPC_API_VERSION = "1.0"
|
||||
|
||||
def __init__(self, timeout=None):
|
||||
self._client = messaging.get_rpc_client(
|
||||
timeout=timeout,
|
||||
topic=consts.TOPIC_DC_MANAGER_ORCHESTRATOR,
|
||||
version=self.BASE_RPC_API_VERSION)
|
||||
version=self.BASE_RPC_API_VERSION,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def make_msg(method, **kwargs):
|
||||
@ -56,17 +57,21 @@ class ManagerOrchestratorClient(object):
|
||||
return client.cast(ctxt, method, **kwargs)
|
||||
|
||||
def create_sw_update_strategy(self, ctxt, payload):
|
||||
return self.call(ctxt, self.make_msg('create_sw_update_strategy',
|
||||
payload=payload))
|
||||
return self.call(
|
||||
ctxt, self.make_msg("create_sw_update_strategy", payload=payload)
|
||||
)
|
||||
|
||||
def delete_sw_update_strategy(self, ctxt, update_type=None):
|
||||
return self.call(ctxt, self.make_msg('delete_sw_update_strategy',
|
||||
update_type=update_type))
|
||||
return self.call(
|
||||
ctxt, self.make_msg("delete_sw_update_strategy", update_type=update_type)
|
||||
)
|
||||
|
||||
def apply_sw_update_strategy(self, ctxt, update_type=None):
|
||||
return self.call(ctxt, self.make_msg('apply_sw_update_strategy',
|
||||
update_type=update_type))
|
||||
return self.call(
|
||||
ctxt, self.make_msg("apply_sw_update_strategy", update_type=update_type)
|
||||
)
|
||||
|
||||
def abort_sw_update_strategy(self, ctxt, update_type=None):
|
||||
return self.call(ctxt, self.make_msg('abort_sw_update_strategy',
|
||||
update_type=update_type))
|
||||
return self.call(
|
||||
ctxt, self.make_msg("abort_sw_update_strategy", update_type=update_type)
|
||||
)
|
||||
|
@ -67,9 +67,9 @@ class DCManagerOrchestratorService(service.Service):
|
||||
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
||||
self.init_tgm()
|
||||
self.init_manager()
|
||||
target = oslo_messaging.Target(version=self.rpc_api_version,
|
||||
server=self.host,
|
||||
topic=self.topic)
|
||||
target = oslo_messaging.Target(
|
||||
version=self.rpc_api_version, server=self.host, topic=self.topic
|
||||
)
|
||||
self.target = target
|
||||
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
||||
self._rpc_server.start()
|
||||
@ -89,9 +89,9 @@ class DCManagerOrchestratorService(service.Service):
|
||||
self._rpc_server.stop()
|
||||
self._rpc_server.wait()
|
||||
self._rpc_server = None
|
||||
LOG.info('RPC service stopped successfully')
|
||||
LOG.info("RPC service stopped successfully")
|
||||
except Exception as ex:
|
||||
LOG.error('Failed to stop engine service: %s', str(ex))
|
||||
LOG.error("Failed to stop engine service: %s", str(ex))
|
||||
|
||||
def stop(self):
|
||||
"""Stop anything initiated by start"""
|
||||
@ -110,31 +110,32 @@ class DCManagerOrchestratorService(service.Service):
|
||||
@request_context
|
||||
def create_sw_update_strategy(self, context, payload):
|
||||
# Creates a software update strategy
|
||||
LOG.info("Handling create_sw_update_strategy request of type %s" %
|
||||
payload.get('type'))
|
||||
return self.sw_update_manager.create_sw_update_strategy(
|
||||
context, payload)
|
||||
LOG.info(
|
||||
"Handling create_sw_update_strategy request of type %s"
|
||||
% payload.get("type")
|
||||
)
|
||||
return self.sw_update_manager.create_sw_update_strategy(context, payload)
|
||||
|
||||
@request_context
|
||||
def delete_sw_update_strategy(self, context, update_type=None):
|
||||
# Deletes the software update strategy
|
||||
LOG.info("Handling delete_sw_update_strategy request")
|
||||
return self.sw_update_manager.delete_sw_update_strategy(
|
||||
context,
|
||||
update_type=update_type)
|
||||
context, update_type=update_type
|
||||
)
|
||||
|
||||
@request_context
|
||||
def apply_sw_update_strategy(self, context, update_type=None):
|
||||
# Applies the software update strategy
|
||||
LOG.info("Handling apply_sw_update_strategy request")
|
||||
return self.sw_update_manager.apply_sw_update_strategy(
|
||||
context,
|
||||
update_type=update_type)
|
||||
context, update_type=update_type
|
||||
)
|
||||
|
||||
@request_context
|
||||
def abort_sw_update_strategy(self, context, update_type=None):
|
||||
# Aborts the software update strategy
|
||||
LOG.info("Handling abort_sw_update_strategy request")
|
||||
return self.sw_update_manager.abort_sw_update_strategy(
|
||||
context,
|
||||
update_type=update_type)
|
||||
context, update_type=update_type
|
||||
)
|
||||
|
@ -7,16 +7,17 @@
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.orch_thread import OrchThread
|
||||
from dcmanager.orchestrator.states.software.apply_vim_software_strategy import \
|
||||
ApplyVIMSoftwareStrategyState
|
||||
from dcmanager.orchestrator.states.software.cache.shared_cache_repository import \
|
||||
SharedCacheRepository
|
||||
from dcmanager.orchestrator.states.software.create_vim_software_strategy import \
|
||||
CreateVIMSoftwareStrategyState
|
||||
from dcmanager.orchestrator.states.software.finish_strategy import \
|
||||
FinishStrategyState
|
||||
from dcmanager.orchestrator.states.software.install_license import \
|
||||
InstallLicenseState
|
||||
from dcmanager.orchestrator.states.software.apply_vim_software_strategy import (
|
||||
ApplyVIMSoftwareStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.cache.shared_cache_repository import (
|
||||
SharedCacheRepository,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.create_vim_software_strategy import (
|
||||
CreateVIMSoftwareStrategyState,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.finish_strategy import FinishStrategyState
|
||||
from dcmanager.orchestrator.states.software.install_license import InstallLicenseState
|
||||
from dcmanager.orchestrator.states.software.pre_check import PreCheckState
|
||||
|
||||
|
||||
@ -50,9 +51,10 @@ class SoftwareOrchThread(OrchThread):
|
||||
super().__init__(
|
||||
strategy_lock,
|
||||
audit_rpc_client,
|
||||
consts.SW_UPDATE_TYPE_SOFTWARE, # software update strategy type
|
||||
vim.STRATEGY_NAME_SW_USM, # strategy type used by vim
|
||||
consts.STRATEGY_STATE_SW_PRE_CHECK) # starting state
|
||||
consts.SW_UPDATE_TYPE_SOFTWARE, # software update strategy type
|
||||
vim.STRATEGY_NAME_SW_USM, # strategy type used by vim
|
||||
consts.STRATEGY_STATE_SW_PRE_CHECK, # starting state
|
||||
)
|
||||
|
||||
# Initialize shared cache instances for the states that require them
|
||||
self._shared_caches = SharedCacheRepository(consts.SW_UPDATE_TYPE_SOFTWARE)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2020-2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -27,11 +27,17 @@ WAIT_INTERVAL = 60
|
||||
class ApplyingVIMStrategyState(BaseState):
|
||||
"""State for applying the VIM strategy."""
|
||||
|
||||
def __init__(self, next_state, region_name, strategy_name,
|
||||
wait_attempts=DEFAULT_MAX_WAIT_ATTEMPTS,
|
||||
wait_interval=WAIT_INTERVAL):
|
||||
def __init__(
|
||||
self,
|
||||
next_state,
|
||||
region_name,
|
||||
strategy_name,
|
||||
wait_attempts=DEFAULT_MAX_WAIT_ATTEMPTS,
|
||||
wait_interval=WAIT_INTERVAL,
|
||||
):
|
||||
super(ApplyingVIMStrategyState, self).__init__(
|
||||
next_state=next_state, region_name=region_name)
|
||||
next_state=next_state, region_name=region_name
|
||||
)
|
||||
self.strategy_name = strategy_name
|
||||
self.max_failed_queries = DEFAULT_MAX_FAILED_QUERIES
|
||||
self.wait_attempts = wait_attempts
|
||||
@ -52,42 +58,50 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
|
||||
# query the vim strategy. Check if it is None
|
||||
subcloud_strategy = self.get_vim_client(region).get_strategy(
|
||||
strategy_name=self.strategy_name,
|
||||
raise_error_if_missing=False)
|
||||
strategy_name=self.strategy_name, raise_error_if_missing=False
|
||||
)
|
||||
|
||||
# Do not raise the default exception if there is no strategy
|
||||
# because the default exception is unclear: ie: "Get strategy failed"
|
||||
if subcloud_strategy is None:
|
||||
raise Exception("(%s) VIM Strategy not found."
|
||||
% self.strategy_name)
|
||||
raise Exception("(%s) VIM Strategy not found." % self.strategy_name)
|
||||
|
||||
# We have a VIM strategy, but need to check if it is ready to apply
|
||||
elif subcloud_strategy.state == vim.STATE_READY_TO_APPLY:
|
||||
# An exception here will fail this state
|
||||
subcloud_strategy = self.get_vim_client(region).apply_strategy(
|
||||
strategy_name=self.strategy_name)
|
||||
strategy_name=self.strategy_name
|
||||
)
|
||||
if subcloud_strategy.state == vim.STATE_APPLYING:
|
||||
self.info_log(strategy_step,
|
||||
"(%s) VIM Strategy apply in progress"
|
||||
% self.strategy_name)
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"(%s) VIM Strategy apply in progress" % self.strategy_name,
|
||||
)
|
||||
elif subcloud_strategy.state == vim.STATE_APPLIED:
|
||||
# Success.
|
||||
self.info_log(strategy_step,
|
||||
"(%s) VIM strategy has been applied"
|
||||
% self.strategy_name)
|
||||
elif subcloud_strategy.state in [vim.STATE_APPLY_FAILED,
|
||||
vim.STATE_APPLY_TIMEOUT]:
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"(%s) VIM strategy has been applied" % self.strategy_name,
|
||||
)
|
||||
elif subcloud_strategy.state in [
|
||||
vim.STATE_APPLY_FAILED,
|
||||
vim.STATE_APPLY_TIMEOUT,
|
||||
]:
|
||||
# Explicit known failure states
|
||||
raise Exception("(%s) VIM strategy apply failed. %s. %s"
|
||||
% (self.strategy_name,
|
||||
subcloud_strategy.state,
|
||||
subcloud_strategy.apply_phase.reason))
|
||||
raise Exception(
|
||||
"(%s) VIM strategy apply failed. %s. %s"
|
||||
% (
|
||||
self.strategy_name,
|
||||
subcloud_strategy.state,
|
||||
subcloud_strategy.apply_phase.reason,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Other states are bad
|
||||
raise Exception("(%s) VIM strategy apply failed. "
|
||||
"Unexpected State: %s."
|
||||
% (self.strategy_name,
|
||||
subcloud_strategy.state))
|
||||
raise Exception(
|
||||
"(%s) VIM strategy apply failed. Unexpected State: %s."
|
||||
% (self.strategy_name, subcloud_strategy.state)
|
||||
)
|
||||
|
||||
# wait for new strategy to apply or the existing strategy to complete.
|
||||
# Loop until the strategy applies. Repeatedly query the API
|
||||
@ -109,16 +123,17 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
# break out of the loop if the max number of attempts is reached
|
||||
wait_count += 1
|
||||
if wait_count >= self.wait_attempts:
|
||||
raise Exception("Timeout applying (%s) vim strategy."
|
||||
% self.strategy_name)
|
||||
raise Exception(
|
||||
"Timeout applying (%s) vim strategy." % self.strategy_name
|
||||
)
|
||||
# every loop we wait, even the first one
|
||||
time.sleep(self.wait_interval)
|
||||
|
||||
# get the strategy
|
||||
try:
|
||||
subcloud_strategy = self.get_vim_client(region).get_strategy(
|
||||
strategy_name=self.strategy_name,
|
||||
raise_error_if_missing=False)
|
||||
strategy_name=self.strategy_name, raise_error_if_missing=False
|
||||
)
|
||||
get_fail_count = 0
|
||||
except Exception:
|
||||
# When applying the strategy to a subcloud, the VIM can
|
||||
@ -128,52 +143,64 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
get_fail_count += 1
|
||||
if get_fail_count >= self.max_failed_queries:
|
||||
# We have waited too long.
|
||||
raise Exception("Timeout during recovery of apply "
|
||||
"(%s) Vim strategy."
|
||||
% self.strategy_name)
|
||||
self.debug_log(strategy_step,
|
||||
"Unable to get (%s) vim strategy - attempt %d"
|
||||
% (self.strategy_name, get_fail_count))
|
||||
raise Exception(
|
||||
"Timeout during recovery of apply (%s) Vim strategy."
|
||||
% self.strategy_name
|
||||
)
|
||||
self.debug_log(
|
||||
strategy_step,
|
||||
"Unable to get (%s) vim strategy - attempt %d"
|
||||
% (self.strategy_name, get_fail_count),
|
||||
)
|
||||
continue
|
||||
# If an external actor has deleted the strategy, the only option
|
||||
# is to fail this state.
|
||||
if subcloud_strategy is None:
|
||||
raise Exception("(%s) VIM Strategy no longer exists."
|
||||
% self.strategy_name)
|
||||
raise Exception(
|
||||
"(%s) VIM Strategy no longer exists." % self.strategy_name
|
||||
)
|
||||
|
||||
elif subcloud_strategy.state == vim.STATE_APPLYING:
|
||||
# Still applying. Update details if it has changed
|
||||
new_details = ("%s phase is %s%% complete" % (
|
||||
new_details = "%s phase is %s%% complete" % (
|
||||
subcloud_strategy.current_phase,
|
||||
subcloud_strategy.current_phase_completion_percentage))
|
||||
subcloud_strategy.current_phase_completion_percentage,
|
||||
)
|
||||
if new_details != last_details:
|
||||
# Progress is being made.
|
||||
# Reset the counter and log the progress
|
||||
last_details = new_details
|
||||
wait_count = 0
|
||||
self.info_log(strategy_step, new_details)
|
||||
db_api.strategy_step_update(self.context,
|
||||
strategy_step.subcloud_id,
|
||||
details=new_details)
|
||||
db_api.strategy_step_update(
|
||||
self.context, strategy_step.subcloud_id, details=new_details
|
||||
)
|
||||
elif subcloud_strategy.state == vim.STATE_APPLIED:
|
||||
# Success.
|
||||
self.info_log(strategy_step,
|
||||
"(%s) Vim strategy has been applied"
|
||||
% self.strategy_name)
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"(%s) Vim strategy has been applied" % self.strategy_name,
|
||||
)
|
||||
break
|
||||
elif subcloud_strategy.state in [vim.STATE_APPLY_FAILED,
|
||||
vim.STATE_APPLY_TIMEOUT]:
|
||||
elif subcloud_strategy.state in [
|
||||
vim.STATE_APPLY_FAILED,
|
||||
vim.STATE_APPLY_TIMEOUT,
|
||||
]:
|
||||
# Explicit known failure states
|
||||
raise Exception("(%s) Vim strategy apply failed. %s. %s"
|
||||
% (self.strategy_name,
|
||||
subcloud_strategy.state,
|
||||
subcloud_strategy.apply_phase.reason))
|
||||
raise Exception(
|
||||
"(%s) Vim strategy apply failed. %s. %s"
|
||||
% (
|
||||
self.strategy_name,
|
||||
subcloud_strategy.state,
|
||||
subcloud_strategy.apply_phase.reason,
|
||||
)
|
||||
)
|
||||
else:
|
||||
# Other states are bad
|
||||
raise Exception("(%s) Vim strategy apply failed. "
|
||||
"Unexpected State: %s."
|
||||
% (self.strategy_name,
|
||||
subcloud_strategy.state))
|
||||
raise Exception(
|
||||
"(%s) Vim strategy apply failed. Unexpected State: %s."
|
||||
% (self.strategy_name, subcloud_strategy.state)
|
||||
)
|
||||
# end of loop
|
||||
|
||||
# Success, state machine can proceed to the next state
|
||||
|
@ -48,39 +48,59 @@ class BaseState(object, metaclass=abc.ABCMeta):
|
||||
return False
|
||||
|
||||
def debug_log(self, strategy_step, details):
|
||||
LOG.debug("Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details))
|
||||
LOG.debug(
|
||||
"Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details,
|
||||
)
|
||||
)
|
||||
|
||||
def info_log(self, strategy_step, details):
|
||||
LOG.info("Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details))
|
||||
LOG.info(
|
||||
"Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details,
|
||||
)
|
||||
)
|
||||
|
||||
def warn_log(self, strategy_step, details):
|
||||
LOG.warn("Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details))
|
||||
LOG.warn(
|
||||
"Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details,
|
||||
)
|
||||
)
|
||||
|
||||
def error_log(self, strategy_step, details):
|
||||
LOG.error("Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details))
|
||||
LOG.error(
|
||||
"Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details,
|
||||
)
|
||||
)
|
||||
|
||||
def exception_log(self, strategy_step, details):
|
||||
LOG.exception("Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details))
|
||||
LOG.exception(
|
||||
"Stage: %s, State: %s, Subcloud: %s, Details: %s"
|
||||
% (
|
||||
strategy_step.stage,
|
||||
strategy_step.state,
|
||||
self.get_subcloud_name(strategy_step),
|
||||
details,
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_region_name(strategy_step):
|
||||
@ -110,18 +130,16 @@ class BaseState(object, metaclass=abc.ABCMeta):
|
||||
)
|
||||
return os_client.keystone_client
|
||||
except Exception:
|
||||
LOG.warning('Failure initializing KeystoneClient for region: %s'
|
||||
% region_name)
|
||||
LOG.warning(
|
||||
"Failure initializing KeystoneClient for region: %s" % region_name
|
||||
)
|
||||
raise
|
||||
|
||||
def get_sysinv_client(self, region_name):
|
||||
"""construct a sysinv client
|
||||
|
||||
"""
|
||||
"""construct a sysinv client"""
|
||||
keystone_client = self.get_keystone_client(region_name)
|
||||
endpoint = keystone_client.endpoint_cache.get_endpoint('sysinv')
|
||||
return SysinvClient(region_name, keystone_client.session,
|
||||
endpoint=endpoint)
|
||||
endpoint = keystone_client.endpoint_cache.get_endpoint("sysinv")
|
||||
return SysinvClient(region_name, keystone_client.session, endpoint=endpoint)
|
||||
|
||||
def get_fm_client(self, region_name):
|
||||
keystone_client = self.get_keystone_client(region_name)
|
||||
@ -145,9 +163,7 @@ class BaseState(object, metaclass=abc.ABCMeta):
|
||||
return self.get_sysinv_client(self.region_name)
|
||||
|
||||
def get_barbican_client(self, region_name):
|
||||
"""construct a barbican client
|
||||
|
||||
"""
|
||||
"""construct a barbican client"""
|
||||
keystone_client = self.get_keystone_client(region_name)
|
||||
|
||||
return BarbicanClient(region_name, keystone_client.session)
|
||||
@ -155,8 +171,7 @@ class BaseState(object, metaclass=abc.ABCMeta):
|
||||
def get_vim_client(self, region_name):
|
||||
"""construct a vim client for a region."""
|
||||
keystone_client = self.get_keystone_client(region_name)
|
||||
return VimClient(region_name,
|
||||
keystone_client.session)
|
||||
return VimClient(region_name, keystone_client.session)
|
||||
|
||||
def add_shared_caches(self, shared_caches):
|
||||
# Shared caches not required by all states, so instantiate only if necessary
|
||||
@ -166,8 +181,9 @@ class BaseState(object, metaclass=abc.ABCMeta):
|
||||
if self._shared_caches is not None:
|
||||
return self._shared_caches.read(cache_type, **filter_params)
|
||||
else:
|
||||
InvalidParameterValue(err="Specified cache type '%s' not "
|
||||
"present" % cache_type)
|
||||
InvalidParameterValue(
|
||||
err="Specified cache type '%s' not present" % cache_type
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def perform_state_action(self, strategy_step):
|
||||
|
@ -21,9 +21,7 @@ class CreatingVIMStrategyState(BaseState):
|
||||
"""State for creating the VIM strategy."""
|
||||
|
||||
def __init__(self, next_state, region_name, strategy_name):
|
||||
super().__init__(
|
||||
next_state=next_state, region_name=region_name
|
||||
)
|
||||
super().__init__(next_state=next_state, region_name=region_name)
|
||||
self.strategy_name = strategy_name
|
||||
# max time to wait for the strategy to be built (in seconds)
|
||||
# is: sleep_duration * max_queries
|
||||
@ -31,8 +29,7 @@ class CreatingVIMStrategyState(BaseState):
|
||||
self.max_queries = DEFAULT_MAX_QUERIES
|
||||
|
||||
def _create_vim_strategy(self, strategy_step, region):
|
||||
self.info_log(strategy_step,
|
||||
"Creating (%s) VIM strategy" % self.strategy_name)
|
||||
self.info_log(strategy_step, "Creating (%s) VIM strategy" % self.strategy_name)
|
||||
|
||||
# Get the update options
|
||||
opts_dict = utils.get_sw_update_opts(
|
||||
@ -51,19 +48,20 @@ class CreatingVIMStrategyState(BaseState):
|
||||
# release and rollback will be sent as a **kwargs value for sw-deploy strategy
|
||||
subcloud_strategy = self.get_vim_client(region).create_strategy(
|
||||
self.strategy_name,
|
||||
opts_dict['storage-apply-type'],
|
||||
opts_dict['worker-apply-type'],
|
||||
opts_dict['max-parallel-workers'],
|
||||
opts_dict['default-instance-action'],
|
||||
opts_dict['alarm-restriction-type'],
|
||||
release=opts_dict.get('release_id'),
|
||||
rollback=opts_dict.get('rollback'),
|
||||
opts_dict["storage-apply-type"],
|
||||
opts_dict["worker-apply-type"],
|
||||
opts_dict["max-parallel-workers"],
|
||||
opts_dict["default-instance-action"],
|
||||
opts_dict["alarm-restriction-type"],
|
||||
release=opts_dict.get("release_id"),
|
||||
rollback=opts_dict.get("rollback"),
|
||||
)
|
||||
|
||||
# a successful API call to create MUST set the state be 'building'
|
||||
if subcloud_strategy.state != vim.STATE_BUILDING:
|
||||
raise Exception("Unexpected VIM strategy build state: %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"Unexpected VIM strategy build state: %s" % subcloud_strategy.state
|
||||
)
|
||||
return subcloud_strategy
|
||||
|
||||
def skip_check(self, strategy_step, subcloud_strategy):
|
||||
@ -83,37 +81,40 @@ class CreatingVIMStrategyState(BaseState):
|
||||
|
||||
# Get the existing VIM strategy, which may be None
|
||||
subcloud_strategy = self.get_vim_client(region).get_strategy(
|
||||
strategy_name=self.strategy_name,
|
||||
raise_error_if_missing=False)
|
||||
strategy_name=self.strategy_name, raise_error_if_missing=False
|
||||
)
|
||||
|
||||
if subcloud_strategy is None:
|
||||
subcloud_strategy = self._create_vim_strategy(strategy_step,
|
||||
region)
|
||||
subcloud_strategy = self._create_vim_strategy(strategy_step, region)
|
||||
else:
|
||||
self.info_log(strategy_step,
|
||||
"VIM strategy exists with state: %s"
|
||||
% subcloud_strategy.state)
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"VIM strategy exists with state: %s" % subcloud_strategy.state,
|
||||
)
|
||||
# if a strategy exists in any type of failed state or aborted
|
||||
# state it should be deleted.
|
||||
# applied state should also be deleted from previous success runs.
|
||||
if subcloud_strategy.state in [vim.STATE_BUILDING,
|
||||
vim.STATE_APPLYING,
|
||||
vim.STATE_ABORTING]:
|
||||
if subcloud_strategy.state in [
|
||||
vim.STATE_BUILDING,
|
||||
vim.STATE_APPLYING,
|
||||
vim.STATE_ABORTING,
|
||||
]:
|
||||
# Can't delete a strategy in these states
|
||||
message = ("Failed to create a VIM strategy for %s. "
|
||||
"There already is an existing strategy in %s state"
|
||||
% (region, subcloud_strategy.state))
|
||||
message = (
|
||||
"Failed to create a VIM strategy for %s. "
|
||||
"There already is an existing strategy in %s state"
|
||||
% (region, subcloud_strategy.state)
|
||||
)
|
||||
self.warn_log(strategy_step, message)
|
||||
raise Exception(message)
|
||||
|
||||
# if strategy exists in any other type of state, delete and create
|
||||
self.info_log(strategy_step,
|
||||
"Deleting existing VIM strategy")
|
||||
self.info_log(strategy_step, "Deleting existing VIM strategy")
|
||||
self.get_vim_client(region).delete_strategy(
|
||||
strategy_name=self.strategy_name)
|
||||
strategy_name=self.strategy_name
|
||||
)
|
||||
# re-create it
|
||||
subcloud_strategy = self._create_vim_strategy(strategy_step,
|
||||
region)
|
||||
subcloud_strategy = self._create_vim_strategy(strategy_step, region)
|
||||
|
||||
# A strategy already exists, or is being built
|
||||
# Loop until the strategy is done building Repeatedly query the API
|
||||
@ -123,22 +124,22 @@ class CreatingVIMStrategyState(BaseState):
|
||||
if self.stopped():
|
||||
raise StrategyStoppedException()
|
||||
if counter >= self.max_queries:
|
||||
raise Exception("Timeout building vim strategy. state: %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"Timeout building vim strategy. state: %s" % subcloud_strategy.state
|
||||
)
|
||||
counter += 1
|
||||
time.sleep(self.sleep_duration)
|
||||
|
||||
# query the vim strategy to see if it is in the new state
|
||||
subcloud_strategy = self.get_vim_client(region).get_strategy(
|
||||
strategy_name=self.strategy_name,
|
||||
raise_error_if_missing=True)
|
||||
strategy_name=self.strategy_name, raise_error_if_missing=True
|
||||
)
|
||||
|
||||
# Check for skip criteria where a failed 'build' might be expected
|
||||
# pylint: disable-next=assignment-from-none
|
||||
skip_state = self.skip_check(strategy_step, subcloud_strategy)
|
||||
if skip_state is not None:
|
||||
self.info_log(strategy_step,
|
||||
"Skip forward to state:(%s)" % skip_state)
|
||||
self.info_log(strategy_step, "Skip forward to state:(%s)" % skip_state)
|
||||
self.override_next_state(skip_state)
|
||||
# break out of loop. Let overridden 'next_state' take over
|
||||
break
|
||||
@ -150,15 +151,18 @@ class CreatingVIMStrategyState(BaseState):
|
||||
# This is the expected state while creating the strategy
|
||||
pass
|
||||
elif subcloud_strategy.state == vim.STATE_BUILD_FAILED:
|
||||
raise Exception("VIM strategy build failed: %s. %s."
|
||||
% (subcloud_strategy.state,
|
||||
subcloud_strategy.build_phase.reason))
|
||||
raise Exception(
|
||||
"VIM strategy build failed: %s. %s."
|
||||
% (subcloud_strategy.state, subcloud_strategy.build_phase.reason)
|
||||
)
|
||||
elif subcloud_strategy.state == vim.STATE_BUILD_TIMEOUT:
|
||||
raise Exception("VIM strategy build timed out: %s."
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"VIM strategy build timed out: %s." % subcloud_strategy.state
|
||||
)
|
||||
else:
|
||||
raise Exception("VIM strategy unexpected build state: %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"VIM strategy unexpected build state: %s" % subcloud_strategy.state
|
||||
)
|
||||
|
||||
# Success, state machine can proceed to the next state
|
||||
return self.next_state
|
||||
|
@ -32,7 +32,8 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
def __init__(self, region_name):
|
||||
super(ApplyingVIMStrategyState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_FINISHING_FW_UPDATE,
|
||||
region_name=region_name)
|
||||
region_name=region_name,
|
||||
)
|
||||
self.max_failed_queries = DEFAULT_MAX_FAILED_QUERIES
|
||||
self.wait_attempts = DEFAULT_MAX_WAIT_ATTEMPTS
|
||||
self.wait_interval = WAIT_INTERVAL
|
||||
@ -54,8 +55,8 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
# Do not raise the default exception if there is no strategy
|
||||
# because the default exception is unclear: ie: "Get strategy failed"
|
||||
subcloud_strategy = self.get_vim_client(region).get_strategy(
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE,
|
||||
raise_error_if_missing=False)
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE, raise_error_if_missing=False
|
||||
)
|
||||
|
||||
if subcloud_strategy is None:
|
||||
self.info_log(strategy_step, "Skip. There is no strategy to apply")
|
||||
@ -65,13 +66,15 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
if subcloud_strategy.state == vim.STATE_READY_TO_APPLY:
|
||||
# An exception here will fail this state
|
||||
subcloud_strategy = self.get_vim_client(region).apply_strategy(
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE)
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE
|
||||
)
|
||||
if subcloud_strategy.state == vim.STATE_APPLYING:
|
||||
self.info_log(strategy_step, "VIM Strategy apply in progress")
|
||||
else:
|
||||
raise Exception("VIM strategy apply failed - "
|
||||
"unexpected strategy state %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"VIM strategy apply failed - unexpected strategy state %s"
|
||||
% subcloud_strategy.state
|
||||
)
|
||||
|
||||
# wait for the new strategy to apply or an existing strategy.
|
||||
# Loop until the strategy applies. Repeatedly query the API
|
||||
@ -101,7 +104,8 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
try:
|
||||
subcloud_strategy = self.get_vim_client(region).get_strategy(
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE,
|
||||
raise_error_if_missing=False)
|
||||
raise_error_if_missing=False,
|
||||
)
|
||||
get_fail_count = 0
|
||||
except Exception:
|
||||
# When applying the strategy to a subcloud, the VIM can
|
||||
@ -111,11 +115,13 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
get_fail_count += 1
|
||||
if get_fail_count >= self.max_failed_queries:
|
||||
# We have waited too long.
|
||||
raise Exception("Timeout during recovery of apply "
|
||||
"firmware strategy.")
|
||||
self.debug_log(strategy_step,
|
||||
"Unable to get firmware strategy - "
|
||||
"attempt %d" % get_fail_count)
|
||||
raise Exception(
|
||||
"Timeout during recovery of apply firmware strategy."
|
||||
)
|
||||
self.debug_log(
|
||||
strategy_step,
|
||||
"Unable to get firmware strategy - attempt %d" % get_fail_count,
|
||||
)
|
||||
continue
|
||||
# The loop gets here if the API is able to respond
|
||||
# Check if the strategy no longer exists. This should not happen.
|
||||
@ -123,34 +129,38 @@ class ApplyingVIMStrategyState(BaseState):
|
||||
raise Exception("Firmware strategy disappeared while applying")
|
||||
elif subcloud_strategy.state == vim.STATE_APPLYING:
|
||||
# Still applying. Update details if it has changed
|
||||
new_details = ("%s phase is %s%% complete" % (
|
||||
new_details = "%s phase is %s%% complete" % (
|
||||
subcloud_strategy.current_phase,
|
||||
subcloud_strategy.current_phase_completion_percentage))
|
||||
subcloud_strategy.current_phase_completion_percentage,
|
||||
)
|
||||
if new_details != last_details:
|
||||
# Progress is being made.
|
||||
# Reset the counter and log the progress
|
||||
last_details = new_details
|
||||
wait_count = 0
|
||||
self.info_log(strategy_step, new_details)
|
||||
db_api.strategy_step_update(self.context,
|
||||
strategy_step.subcloud_id,
|
||||
details=new_details)
|
||||
db_api.strategy_step_update(
|
||||
self.context, strategy_step.subcloud_id, details=new_details
|
||||
)
|
||||
elif subcloud_strategy.state == vim.STATE_APPLIED:
|
||||
# Success. Break out of loop
|
||||
self.info_log(strategy_step,
|
||||
"Firmware strategy has been applied")
|
||||
self.info_log(strategy_step, "Firmware strategy has been applied")
|
||||
break
|
||||
elif subcloud_strategy.state in [vim.STATE_APPLY_FAILED,
|
||||
vim.STATE_APPLY_TIMEOUT]:
|
||||
elif subcloud_strategy.state in [
|
||||
vim.STATE_APPLY_FAILED,
|
||||
vim.STATE_APPLY_TIMEOUT,
|
||||
]:
|
||||
# Explicit known failure states
|
||||
raise Exception("Firmware strategy apply failed. %s. %s"
|
||||
% (subcloud_strategy.state,
|
||||
subcloud_strategy.apply_phase.reason))
|
||||
raise Exception(
|
||||
"Firmware strategy apply failed. %s. %s"
|
||||
% (subcloud_strategy.state, subcloud_strategy.apply_phase.reason)
|
||||
)
|
||||
else:
|
||||
# Other states are bad
|
||||
raise Exception("Firmware strategy apply failed. "
|
||||
"Unexpected State: %s."
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"Firmware strategy apply failed. Unexpected State: %s."
|
||||
% subcloud_strategy.state
|
||||
)
|
||||
# end of loop
|
||||
|
||||
# Success, state machine can proceed to the next state
|
||||
|
@ -23,7 +23,8 @@ class CreatingVIMStrategyState(BaseState):
|
||||
def __init__(self, region_name):
|
||||
super(CreatingVIMStrategyState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY,
|
||||
region_name=region_name)
|
||||
region_name=region_name,
|
||||
)
|
||||
# max time to wait for the strategy to be built (in seconds)
|
||||
# is: sleep_duration * max_queries
|
||||
self.sleep_duration = DEFAULT_SLEEP_DURATION
|
||||
@ -34,24 +35,25 @@ class CreatingVIMStrategyState(BaseState):
|
||||
|
||||
# Get the update options
|
||||
opts_dict = dcmanager_utils.get_sw_update_opts(
|
||||
self.context,
|
||||
for_sw_update=True,
|
||||
subcloud_id=strategy_step.subcloud_id)
|
||||
self.context, for_sw_update=True, subcloud_id=strategy_step.subcloud_id
|
||||
)
|
||||
|
||||
# Call the API to build the firmware strategy
|
||||
# max-parallel-workers cannot be less than 2 or greater than 5
|
||||
subcloud_strategy = self.get_vim_client(region).create_strategy(
|
||||
vim.STRATEGY_NAME_FW_UPDATE,
|
||||
opts_dict['storage-apply-type'],
|
||||
opts_dict['worker-apply-type'],
|
||||
opts_dict["storage-apply-type"],
|
||||
opts_dict["worker-apply-type"],
|
||||
2, # opts_dict['max-parallel-workers'],
|
||||
opts_dict['default-instance-action'],
|
||||
opts_dict['alarm-restriction-type'])
|
||||
opts_dict["default-instance-action"],
|
||||
opts_dict["alarm-restriction-type"],
|
||||
)
|
||||
|
||||
# a successful API call to create MUST set the state be 'building'
|
||||
if subcloud_strategy.state != vim.STATE_BUILDING:
|
||||
raise Exception("Unexpected VIM strategy build state: %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"Unexpected VIM strategy build state: %s" % subcloud_strategy.state
|
||||
)
|
||||
return subcloud_strategy
|
||||
|
||||
def perform_state_action(self, strategy_step):
|
||||
@ -67,34 +69,39 @@ class CreatingVIMStrategyState(BaseState):
|
||||
|
||||
# Get the existing firmware strategy, which may be None
|
||||
subcloud_strategy = self.get_vim_client(region).get_strategy(
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE,
|
||||
raise_error_if_missing=False)
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE, raise_error_if_missing=False
|
||||
)
|
||||
|
||||
if subcloud_strategy is None:
|
||||
subcloud_strategy = self._create_vim_strategy(strategy_step,
|
||||
region)
|
||||
subcloud_strategy = self._create_vim_strategy(strategy_step, region)
|
||||
else:
|
||||
self.info_log(strategy_step,
|
||||
"FW VIM strategy already exists with state: %s"
|
||||
% subcloud_strategy.state)
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"FW VIM strategy already exists with state: %s"
|
||||
% subcloud_strategy.state,
|
||||
)
|
||||
# if a strategy exists in building/applying/aborting do not delete
|
||||
# it and instead raise an exception
|
||||
if subcloud_strategy.state in [vim.STATE_BUILDING,
|
||||
vim.STATE_APPLYING,
|
||||
vim.STATE_ABORTING]:
|
||||
if subcloud_strategy.state in [
|
||||
vim.STATE_BUILDING,
|
||||
vim.STATE_APPLYING,
|
||||
vim.STATE_ABORTING,
|
||||
]:
|
||||
# Can't delete a strategy in these states
|
||||
message = ("Failed to create a VIM strategy for %s. "
|
||||
"There already is an existing strategy in %s state"
|
||||
% (region, subcloud_strategy.state))
|
||||
message = (
|
||||
"Failed to create a VIM strategy for %s. "
|
||||
"There already is an existing strategy in %s state"
|
||||
% (region, subcloud_strategy.state)
|
||||
)
|
||||
self.warn_log(strategy_step, message)
|
||||
raise Exception(message)
|
||||
|
||||
# if strategy exists in any other type of state, delete and create
|
||||
self.info_log(strategy_step, "Deleting existing FW VIM strategy")
|
||||
self.get_vim_client(region).delete_strategy(
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE)
|
||||
subcloud_strategy = self._create_vim_strategy(strategy_step,
|
||||
region)
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE
|
||||
)
|
||||
subcloud_strategy = self._create_vim_strategy(strategy_step, region)
|
||||
|
||||
# A strategy already exists, or is being built
|
||||
# Loop until the strategy is done building Repeatedly query the API
|
||||
@ -104,15 +111,16 @@ class CreatingVIMStrategyState(BaseState):
|
||||
if self.stopped():
|
||||
raise StrategyStoppedException()
|
||||
if counter >= self.max_queries:
|
||||
raise Exception("Timeout building vim strategy. state: %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"Timeout building vim strategy. state: %s" % subcloud_strategy.state
|
||||
)
|
||||
counter += 1
|
||||
time.sleep(self.sleep_duration)
|
||||
|
||||
# query the vim strategy to see if it is in the new state
|
||||
subcloud_strategy = self.get_vim_client(region).get_strategy(
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE,
|
||||
raise_error_if_missing=True)
|
||||
strategy_name=vim.STRATEGY_NAME_FW_UPDATE, raise_error_if_missing=True
|
||||
)
|
||||
if subcloud_strategy.state == vim.STATE_READY_TO_APPLY:
|
||||
self.info_log(strategy_step, "VIM strategy has been built")
|
||||
break
|
||||
@ -120,15 +128,18 @@ class CreatingVIMStrategyState(BaseState):
|
||||
# This is the expected state while creating the strategy
|
||||
pass
|
||||
elif subcloud_strategy.state == vim.STATE_BUILD_FAILED:
|
||||
raise Exception("VIM strategy build failed: %s. %s."
|
||||
% (subcloud_strategy.state,
|
||||
subcloud_strategy.build_phase.reason))
|
||||
raise Exception(
|
||||
"VIM strategy build failed: %s. %s."
|
||||
% (subcloud_strategy.state, subcloud_strategy.build_phase.reason)
|
||||
)
|
||||
elif subcloud_strategy.state == vim.STATE_BUILD_TIMEOUT:
|
||||
raise Exception("VIM strategy build timed out: %s."
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"VIM strategy build timed out: %s." % subcloud_strategy.state
|
||||
)
|
||||
else:
|
||||
raise Exception("VIM strategy unexpected build state: %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"VIM strategy unexpected build state: %s" % subcloud_strategy.state
|
||||
)
|
||||
|
||||
# Success, state machine can proceed to the next state
|
||||
return self.next_state
|
||||
|
@ -24,15 +24,20 @@ class FinishingFwUpdateState(BaseState):
|
||||
|
||||
def __init__(self, region_name):
|
||||
super(FinishingFwUpdateState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_COMPLETE, region_name=region_name)
|
||||
next_state=consts.STRATEGY_STATE_COMPLETE, region_name=region_name
|
||||
)
|
||||
self.max_failed_queries = DEFAULT_MAX_FAILED_QUERIES
|
||||
self.failed_sleep_duration = DEFAULT_FAILED_SLEEP
|
||||
|
||||
def align_subcloud_status(self, strategy_step):
|
||||
self.info_log(strategy_step,
|
||||
"Setting endpoint status of %s to %s"
|
||||
% (dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
|
||||
dccommon_consts.SYNC_STATUS_IN_SYNC))
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Setting endpoint status of %s to %s"
|
||||
% (
|
||||
dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
|
||||
dccommon_consts.SYNC_STATUS_IN_SYNC,
|
||||
),
|
||||
)
|
||||
dcmanager_state_rpc_client = dcmanager_rpc_client.SubcloudStateClient()
|
||||
# The subcloud name may differ from the region name in the strategy_step
|
||||
dcmanager_state_rpc_client.update_subcloud_endpoint_status(
|
||||
@ -40,7 +45,8 @@ class FinishingFwUpdateState(BaseState):
|
||||
subcloud_name=self.get_subcloud_name(strategy_step),
|
||||
subcloud_region=self.get_region_name(strategy_step),
|
||||
endpoint_type=dccommon_consts.ENDPOINT_TYPE_FIRMWARE,
|
||||
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
sync_status=dccommon_consts.SYNC_STATUS_IN_SYNC,
|
||||
)
|
||||
|
||||
def perform_state_action(self, strategy_step):
|
||||
"""Finish the firmware update.
|
||||
@ -71,8 +77,9 @@ class FinishingFwUpdateState(BaseState):
|
||||
try:
|
||||
subcloud_hosts = self.get_sysinv_client(region).get_hosts()
|
||||
for host in subcloud_hosts:
|
||||
host_devices = self.get_sysinv_client(
|
||||
region).get_host_device_list(host.uuid)
|
||||
host_devices = self.get_sysinv_client(region).get_host_device_list(
|
||||
host.uuid
|
||||
)
|
||||
for device in host_devices:
|
||||
if device.enabled:
|
||||
enabled_host_device_list.append(device)
|
||||
@ -99,12 +106,13 @@ class FinishingFwUpdateState(BaseState):
|
||||
try:
|
||||
# determine list of applied subcloud images
|
||||
subcloud_images = self.get_sysinv_client(region).get_device_images()
|
||||
applied_subcloud_images = \
|
||||
utils.filter_applied_images(subcloud_images,
|
||||
expected_value=True)
|
||||
applied_subcloud_images = utils.filter_applied_images(
|
||||
subcloud_images, expected_value=True
|
||||
)
|
||||
# Retrieve the device image states on this subcloud.
|
||||
subcloud_device_image_states = self.get_sysinv_client(
|
||||
region).get_device_image_states()
|
||||
region
|
||||
).get_device_image_states()
|
||||
break
|
||||
except Exception:
|
||||
# TODO(rlima): Invert the fail counter with the validation to fix
|
||||
@ -112,7 +120,8 @@ class FinishingFwUpdateState(BaseState):
|
||||
# DEFAULT_MAX_FAILED_QUERIES
|
||||
if fail_counter >= self.max_failed_queries:
|
||||
raise Exception(
|
||||
"Timeout waiting to query subcloud device image info")
|
||||
"Timeout waiting to query subcloud device image info"
|
||||
)
|
||||
fail_counter += 1
|
||||
time.sleep(self.failed_sleep_duration)
|
||||
|
||||
@ -127,9 +136,9 @@ class FinishingFwUpdateState(BaseState):
|
||||
if device is not None:
|
||||
image = image_map.get(device_image_state_obj.image_uuid)
|
||||
if image is not None:
|
||||
self.info_log(strategy_step,
|
||||
"Failed apply: %s"
|
||||
% device_image_state_obj)
|
||||
self.info_log(
|
||||
strategy_step, "Failed apply: %s" % device_image_state_obj
|
||||
)
|
||||
failed_states.append(device_image_state_obj)
|
||||
if failed_states:
|
||||
# todo(abailey): create a custom Exception
|
||||
|
@ -15,14 +15,15 @@ from dcmanager.orchestrator.states.firmware import utils
|
||||
class ImportingFirmwareState(BaseState):
|
||||
"""State for importing firmware
|
||||
|
||||
Query the device-images on the system controller that are 'pending'
|
||||
Ensure those device images are uploaded on the subcloud.
|
||||
Query the device-images on the system controller that are 'pending'
|
||||
Ensure those device images are uploaded on the subcloud.
|
||||
"""
|
||||
|
||||
def __init__(self, region_name):
|
||||
super(ImportingFirmwareState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY,
|
||||
region_name=region_name)
|
||||
region_name=region_name,
|
||||
)
|
||||
|
||||
def _image_in_list(self, image, image_list):
|
||||
# todo(abailey): FUTURE. There may be other ways that two images can
|
||||
@ -46,33 +47,32 @@ class ImportingFirmwareState(BaseState):
|
||||
|
||||
# ============== query system controller images ==============
|
||||
system_controller_images = self.get_sysinv_client(
|
||||
dccommon_consts.DEFAULT_REGION_NAME).get_device_images()
|
||||
dccommon_consts.DEFAULT_REGION_NAME
|
||||
).get_device_images()
|
||||
# determine list of applied system controller images
|
||||
applied_system_controller_images = \
|
||||
utils.filter_applied_images(system_controller_images,
|
||||
expected_value=True)
|
||||
applied_system_controller_images = utils.filter_applied_images(
|
||||
system_controller_images, expected_value=True
|
||||
)
|
||||
|
||||
# ============== query subcloud images ========================
|
||||
region = self.get_region_name(strategy_step)
|
||||
subcloud_images = self.get_sysinv_client(
|
||||
region).get_device_images()
|
||||
subcloud_images = self.get_sysinv_client(region).get_device_images()
|
||||
# determine list of applied subcloud images
|
||||
applied_subcloud_images = \
|
||||
utils.filter_applied_images(subcloud_images,
|
||||
expected_value=True)
|
||||
applied_subcloud_images = utils.filter_applied_images(
|
||||
subcloud_images, expected_value=True
|
||||
)
|
||||
|
||||
subcloud_device_label_list = self.get_sysinv_client(
|
||||
region).get_device_label_list()
|
||||
region
|
||||
).get_device_label_list()
|
||||
|
||||
subcloud_labels = []
|
||||
for device_label in subcloud_device_label_list:
|
||||
subcloud_labels.append({device_label.label_key:
|
||||
device_label.label_value})
|
||||
subcloud_labels.append({device_label.label_key: device_label.label_value})
|
||||
# - remove any applied images in subcloud that are not applied on the
|
||||
# system controller
|
||||
for image in applied_subcloud_images:
|
||||
if not self._image_in_list(image,
|
||||
applied_system_controller_images):
|
||||
if not self._image_in_list(image, applied_system_controller_images):
|
||||
# the applied image in the subcloud is not in the system
|
||||
# controller applied list, and should be removed
|
||||
# Use the existing labels on the image for the remove
|
||||
@ -81,19 +81,19 @@ class ImportingFirmwareState(BaseState):
|
||||
# Do not append an empty dictionary
|
||||
if label:
|
||||
labels.append(label)
|
||||
self.info_log(strategy_step,
|
||||
"Remove Image %s by labels: %s" % (image.uuid,
|
||||
str(labels)))
|
||||
self.get_sysinv_client(region).remove_device_image(
|
||||
image.uuid,
|
||||
labels)
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Remove Image %s by labels: %s" % (image.uuid, str(labels)),
|
||||
)
|
||||
self.get_sysinv_client(region).remove_device_image(image.uuid, labels)
|
||||
|
||||
# get the list of enabled devices on the subcloud
|
||||
enabled_host_device_list = []
|
||||
subcloud_hosts = self.get_sysinv_client(region).get_hosts()
|
||||
for host in subcloud_hosts:
|
||||
host_devices = self.get_sysinv_client(
|
||||
region).get_host_device_list(host.uuid)
|
||||
host_devices = self.get_sysinv_client(region).get_host_device_list(
|
||||
host.uuid
|
||||
)
|
||||
for device in host_devices:
|
||||
if device.enabled:
|
||||
enabled_host_device_list.append(device)
|
||||
@ -101,48 +101,50 @@ class ImportingFirmwareState(BaseState):
|
||||
if not enabled_host_device_list:
|
||||
# There are no enabled devices in this subcloud, so break out
|
||||
# of this handler, since there will be nothing to upload or apply
|
||||
self.info_log(strategy_step,
|
||||
"No enabled devices. Skipping upload and apply.")
|
||||
self.info_log(
|
||||
strategy_step, "No enabled devices. Skipping upload and apply."
|
||||
)
|
||||
return self.next_state
|
||||
|
||||
# Retrieve the device image states on this subcloud.
|
||||
subcloud_device_image_states = self.get_sysinv_client(
|
||||
region).get_device_image_states()
|
||||
region
|
||||
).get_device_image_states()
|
||||
|
||||
# go through the applied images on system controller
|
||||
# any of the images that correspond to an enabled device on the
|
||||
# subcloud should be uploaded and applied if it does not exist
|
||||
for image in applied_system_controller_images:
|
||||
device = utils.check_subcloud_device_has_image(
|
||||
image,
|
||||
enabled_host_device_list,
|
||||
subcloud_device_label_list)
|
||||
image, enabled_host_device_list, subcloud_device_label_list
|
||||
)
|
||||
if device is not None:
|
||||
# there was a matching device for that image
|
||||
# We need to upload it if it does not exist yet
|
||||
if not self._image_in_list(image, subcloud_images):
|
||||
self.info_log(strategy_step,
|
||||
"Uploading image:%s " % image.uuid)
|
||||
self.info_log(strategy_step, "Uploading image: %s " % image.uuid)
|
||||
bitstreamfile = utils.determine_image_file(image)
|
||||
if not os.path.isfile(bitstreamfile):
|
||||
# We could not find the file in the vault
|
||||
raise Exception("File does not exist: %s"
|
||||
% bitstreamfile)
|
||||
raise Exception("File does not exist: %s" % bitstreamfile)
|
||||
fields = utils.determine_image_fields(image)
|
||||
new_image_response = self.get_sysinv_client(
|
||||
region).upload_device_image(bitstreamfile, fields)
|
||||
self.debug_log(strategy_step,
|
||||
"Upload device image returned: %s"
|
||||
% str(new_image_response))
|
||||
self.info_log(strategy_step,
|
||||
"Uploaded image:%s " % image.uuid)
|
||||
region
|
||||
).upload_device_image(bitstreamfile, fields)
|
||||
self.debug_log(
|
||||
strategy_step,
|
||||
"Upload device image returned: %s" % str(new_image_response),
|
||||
)
|
||||
self.info_log(strategy_step, "Uploaded image: %s" % image.uuid)
|
||||
|
||||
# The image exists on the subcloud
|
||||
# However, it may not have been applied to this device
|
||||
device_image_state = None
|
||||
for device_image_state_obj in subcloud_device_image_states:
|
||||
if device_image_state_obj.pcidevice_uuid == device.uuid \
|
||||
and device_image_state_obj.image_uuid == image.uuid:
|
||||
if (
|
||||
device_image_state_obj.pcidevice_uuid == device.uuid
|
||||
and device_image_state_obj.image_uuid == image.uuid
|
||||
):
|
||||
device_image_state = device_image_state_obj
|
||||
break
|
||||
else:
|
||||
@ -156,29 +158,37 @@ class ImportingFirmwareState(BaseState):
|
||||
# Do not append an empty dictionary
|
||||
if label:
|
||||
labels.append(label)
|
||||
self.info_log(strategy_step,
|
||||
"Applying device image:%s with labels:%s"
|
||||
% (image.uuid, str(labels)))
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Applying device image: %s with labels: %s"
|
||||
% (image.uuid, str(labels)),
|
||||
)
|
||||
|
||||
apply_response = self.get_sysinv_client(
|
||||
region).apply_device_image(image.uuid, labels=labels)
|
||||
self.debug_log(strategy_step,
|
||||
"Apply device image returned: %s"
|
||||
% str(apply_response))
|
||||
self.info_log(strategy_step,
|
||||
"Applied image:%s with labels:%s"
|
||||
% (image.uuid, str(labels)))
|
||||
apply_response = self.get_sysinv_client(region).apply_device_image(
|
||||
image.uuid, labels=labels
|
||||
)
|
||||
self.debug_log(
|
||||
strategy_step,
|
||||
"Apply device image returned: %s" % str(apply_response),
|
||||
)
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Applied image:%s with labels: %s" % (image.uuid, str(labels)),
|
||||
)
|
||||
continue
|
||||
|
||||
# We have a device_image_state. Lets examine the apply status
|
||||
if device_image_state.status != utils.DEVICE_IMAGE_UPDATE_COMPLETED:
|
||||
self.info_log(strategy_step,
|
||||
"Image:%s has not been written. State:%s"
|
||||
% (image.uuid, device_image_state.status))
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Image:%s has not been written. State: %s"
|
||||
% (image.uuid, device_image_state.status),
|
||||
)
|
||||
else:
|
||||
self.info_log(strategy_step,
|
||||
"Skipping already applied image:%s "
|
||||
% image.uuid)
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Skipping already applied image: %s " % image.uuid,
|
||||
)
|
||||
|
||||
# If none of those API calls failed, this state was successful
|
||||
# Success, state machine can proceed to the next state
|
||||
|
@ -7,12 +7,12 @@
|
||||
import os
|
||||
|
||||
# Device Image Status - duplicated from sysinv/common/device.py
|
||||
DEVICE_IMAGE_UPDATE_PENDING = 'pending'
|
||||
DEVICE_IMAGE_UPDATE_IN_PROGRESS = 'in-progress'
|
||||
DEVICE_IMAGE_UPDATE_IN_PROGRESS_ABORTED = 'in-progress-aborted'
|
||||
DEVICE_IMAGE_UPDATE_COMPLETED = 'completed'
|
||||
DEVICE_IMAGE_UPDATE_FAILED = 'failed'
|
||||
DEVICE_IMAGE_UPDATE_NULL = ''
|
||||
DEVICE_IMAGE_UPDATE_PENDING = "pending"
|
||||
DEVICE_IMAGE_UPDATE_IN_PROGRESS = "in-progress"
|
||||
DEVICE_IMAGE_UPDATE_IN_PROGRESS_ABORTED = "in-progress-aborted"
|
||||
DEVICE_IMAGE_UPDATE_COMPLETED = "completed"
|
||||
DEVICE_IMAGE_UPDATE_FAILED = "failed"
|
||||
DEVICE_IMAGE_UPDATE_NULL = ""
|
||||
|
||||
|
||||
# convert a list of objects that have a uuid field, into a map keyed on uuid
|
||||
@ -26,9 +26,9 @@ def to_uuid_map(list_with_uuids):
|
||||
# todo(abailey) refactor based on firmware_audit code for
|
||||
# _check_subcloud_device_has_image
|
||||
# THIS METHOD should be renamed !!
|
||||
def check_subcloud_device_has_image(image,
|
||||
enabled_host_device_list,
|
||||
subcloud_device_label_list):
|
||||
def check_subcloud_device_has_image(
|
||||
image, enabled_host_device_list, subcloud_device_label_list
|
||||
):
|
||||
"""Return device on subcloud that matches the image, or None"""
|
||||
|
||||
apply_to_all_devices = False
|
||||
@ -52,10 +52,8 @@ def check_subcloud_device_has_image(image,
|
||||
label_key = list(image_label.keys())[0]
|
||||
label_value = image_label.get(label_key)
|
||||
is_device_eligible = check_for_label_match(
|
||||
subcloud_device_label_list,
|
||||
device.uuid,
|
||||
label_key,
|
||||
label_value)
|
||||
subcloud_device_label_list, device.uuid, label_key, label_value
|
||||
)
|
||||
# If device label matches any image label stop checking
|
||||
# for any other label matches and do pci comparison below
|
||||
if is_device_eligible:
|
||||
@ -66,8 +64,10 @@ def check_subcloud_device_has_image(image,
|
||||
continue
|
||||
|
||||
# We found an eligible device
|
||||
if image.pci_vendor == device.pvendor_id and \
|
||||
image.pci_device == device.pdevice_id:
|
||||
if (
|
||||
image.pci_vendor == device.pvendor_id
|
||||
and image.pci_device == device.pdevice_id
|
||||
):
|
||||
return device
|
||||
|
||||
# no matching devices
|
||||
@ -76,50 +76,54 @@ def check_subcloud_device_has_image(image,
|
||||
|
||||
# todo(abailey): refactor with https://review.opendev.org/#/c/741515
|
||||
def get_device_image_filename(resource):
|
||||
filename = "{}-{}-{}-{}.bit".format(resource.bitstream_type,
|
||||
resource.pci_vendor,
|
||||
resource.pci_device,
|
||||
resource.uuid)
|
||||
filename = "{}-{}-{}-{}.bit".format(
|
||||
resource.bitstream_type, resource.pci_vendor, resource.pci_device, resource.uuid
|
||||
)
|
||||
return filename
|
||||
|
||||
|
||||
# todo(abailey): use constant from https://review.opendev.org/#/c/741515
|
||||
def determine_image_file(image):
|
||||
"""Find the bitstream file for an image in the vault"""
|
||||
DEVICE_IMAGE_VAULT_DIR = '/opt/dc-vault/device_images'
|
||||
return os.path.join(DEVICE_IMAGE_VAULT_DIR,
|
||||
get_device_image_filename(image))
|
||||
DEVICE_IMAGE_VAULT_DIR = "/opt/dc-vault/device_images"
|
||||
return os.path.join(DEVICE_IMAGE_VAULT_DIR, get_device_image_filename(image))
|
||||
|
||||
|
||||
def determine_image_fields(image):
|
||||
"""Return the appropriate upload fields for an image"""
|
||||
field_list = ['uuid',
|
||||
'bitstream_type',
|
||||
'pci_vendor',
|
||||
'pci_device',
|
||||
'bitstream_id',
|
||||
'key_signature',
|
||||
'revoke_key_id',
|
||||
'name',
|
||||
'description',
|
||||
'image_version',
|
||||
'bmc',
|
||||
'retimer_included']
|
||||
fields = dict((k, str(v)) for (k, v) in vars(image).items()
|
||||
if k in field_list and v is not None)
|
||||
field_list = [
|
||||
"uuid",
|
||||
"bitstream_type",
|
||||
"pci_vendor",
|
||||
"pci_device",
|
||||
"bitstream_id",
|
||||
"key_signature",
|
||||
"revoke_key_id",
|
||||
"name",
|
||||
"description",
|
||||
"image_version",
|
||||
"bmc",
|
||||
"retimer_included",
|
||||
]
|
||||
fields = dict(
|
||||
(k, str(v))
|
||||
for (k, v) in vars(image).items()
|
||||
if k in field_list and v is not None
|
||||
)
|
||||
return fields
|
||||
|
||||
|
||||
def check_for_label_match(subcloud_host_device_label_list,
|
||||
device_uuid,
|
||||
label_key,
|
||||
label_value):
|
||||
def check_for_label_match(
|
||||
subcloud_host_device_label_list, device_uuid, label_key, label_value
|
||||
):
|
||||
# todo(abailey): should this compare pci_device_uuid or vendor/device
|
||||
for device_label in subcloud_host_device_label_list:
|
||||
if device_label.pcidevice_uuid and \
|
||||
device_uuid == device_label.pcidevice_uuid and \
|
||||
label_key == device_label.label_key and \
|
||||
label_value == device_label.label_value:
|
||||
if (
|
||||
device_label.pcidevice_uuid
|
||||
and device_uuid == device_label.pcidevice_uuid
|
||||
and label_key == device_label.label_key
|
||||
and label_value == device_label.label_value
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -127,7 +131,7 @@ def check_for_label_match(subcloud_host_device_label_list,
|
||||
def filter_applied_images(device_images, expected_value=True):
|
||||
"""Filter a list of DeviceImage objects by the 'applied' field
|
||||
|
||||
Returns list of images that have 'applied' field matching expected_value
|
||||
Returns list of images that have 'applied' field matching expected_value
|
||||
"""
|
||||
filtered_images = []
|
||||
for device_image in device_images:
|
||||
|
@ -1,12 +1,11 @@
|
||||
#
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.applying_vim_strategy \
|
||||
import ApplyingVIMStrategyState
|
||||
from dcmanager.orchestrator.states.applying_vim_strategy import ApplyingVIMStrategyState
|
||||
|
||||
|
||||
class ApplyingVIMKubeUpgradeStrategyState(ApplyingVIMStrategyState):
|
||||
@ -16,4 +15,5 @@ class ApplyingVIMKubeUpgradeStrategyState(ApplyingVIMStrategyState):
|
||||
super(ApplyingVIMKubeUpgradeStrategyState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_COMPLETE,
|
||||
region_name=region_name,
|
||||
strategy_name=vim.STRATEGY_NAME_KUBE_UPGRADE)
|
||||
strategy_name=vim.STRATEGY_NAME_KUBE_UPGRADE,
|
||||
)
|
||||
|
@ -7,32 +7,30 @@
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import utils as dcmanager_utils
|
||||
from dcmanager.orchestrator.cache.cache_specifications import \
|
||||
REGION_ONE_KUBERNETES_CACHE_TYPE
|
||||
from dcmanager.orchestrator.states.creating_vim_strategy \
|
||||
import CreatingVIMStrategyState
|
||||
from dcmanager.orchestrator.cache.cache_specifications import (
|
||||
REGION_ONE_KUBERNETES_CACHE_TYPE,
|
||||
)
|
||||
from dcmanager.orchestrator.states.creating_vim_strategy import CreatingVIMStrategyState
|
||||
|
||||
|
||||
class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState):
|
||||
"""State for creating the VIM upgrade strategy."""
|
||||
|
||||
def __init__(self, region_name):
|
||||
next_state = \
|
||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
next_state = consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
super(CreatingVIMKubeUpgradeStrategyState, self).__init__(
|
||||
next_state=next_state,
|
||||
region_name=region_name,
|
||||
strategy_name=vim.STRATEGY_NAME_KUBE_UPGRADE)
|
||||
strategy_name=vim.STRATEGY_NAME_KUBE_UPGRADE,
|
||||
)
|
||||
|
||||
def _create_vim_strategy(self, strategy_step, region):
|
||||
self.info_log(strategy_step,
|
||||
"Creating (%s) VIM strategy" % self.strategy_name)
|
||||
self.info_log(strategy_step, "Creating (%s) VIM strategy" % self.strategy_name)
|
||||
target_kube_version = None
|
||||
|
||||
# If there is an existing kube upgrade object, its to_version is used
|
||||
# This is to allow resume for a kube upgrade
|
||||
subcloud_kube_upgrades = \
|
||||
self.get_sysinv_client(region).get_kube_upgrades()
|
||||
subcloud_kube_upgrades = self.get_sysinv_client(region).get_kube_upgrades()
|
||||
if len(subcloud_kube_upgrades) > 0:
|
||||
target_kube_version = subcloud_kube_upgrades[0].to_version
|
||||
else:
|
||||
@ -43,10 +41,11 @@ class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState):
|
||||
# The following chooses to_version using the same logic as in
|
||||
# KubeUpgradePreCheckState.perform_state_action()
|
||||
extra_args = dcmanager_utils.get_sw_update_strategy_extra_args(
|
||||
self.context, update_type=consts.SW_UPDATE_TYPE_KUBERNETES)
|
||||
self.context, update_type=consts.SW_UPDATE_TYPE_KUBERNETES
|
||||
)
|
||||
if extra_args is None:
|
||||
extra_args = {}
|
||||
to_version = extra_args.get('to-version', None)
|
||||
to_version = extra_args.get("to-version", None)
|
||||
if to_version is None:
|
||||
sys_kube_versions = self._read_from_cache(
|
||||
REGION_ONE_KUBERNETES_CACHE_TYPE
|
||||
@ -59,31 +58,30 @@ class CreatingVIMKubeUpgradeStrategyState(CreatingVIMStrategyState):
|
||||
self.warn_log(strategy_step, message)
|
||||
raise Exception(message)
|
||||
|
||||
kube_versions = \
|
||||
self.get_sysinv_client(region).get_kube_versions()
|
||||
target_kube_version = \
|
||||
dcmanager_utils.select_available_kube_version(
|
||||
kube_versions, to_version
|
||||
)
|
||||
kube_versions = self.get_sysinv_client(region).get_kube_versions()
|
||||
target_kube_version = dcmanager_utils.select_available_kube_version(
|
||||
kube_versions, to_version
|
||||
)
|
||||
|
||||
# Get the update options
|
||||
opts_dict = dcmanager_utils.get_sw_update_opts(
|
||||
self.context,
|
||||
for_sw_update=True,
|
||||
subcloud_id=strategy_step.subcloud_id)
|
||||
self.context, for_sw_update=True, subcloud_id=strategy_step.subcloud_id
|
||||
)
|
||||
|
||||
# Call the API to build the VIM strategy
|
||||
subcloud_strategy = self.get_vim_client(region).create_strategy(
|
||||
self.strategy_name,
|
||||
opts_dict['storage-apply-type'],
|
||||
opts_dict['worker-apply-type'],
|
||||
opts_dict['max-parallel-workers'],
|
||||
opts_dict['default-instance-action'],
|
||||
opts_dict['alarm-restriction-type'],
|
||||
to_version=target_kube_version)
|
||||
opts_dict["storage-apply-type"],
|
||||
opts_dict["worker-apply-type"],
|
||||
opts_dict["max-parallel-workers"],
|
||||
opts_dict["default-instance-action"],
|
||||
opts_dict["alarm-restriction-type"],
|
||||
to_version=target_kube_version,
|
||||
)
|
||||
|
||||
# a successful API call to create MUST set the state be 'building'
|
||||
if subcloud_strategy.state != vim.STATE_BUILDING:
|
||||
raise Exception("Unexpected VIM strategy build state: %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"Unexpected VIM strategy build state: %s" % subcloud_strategy.state
|
||||
)
|
||||
return subcloud_strategy
|
||||
|
@ -8,19 +8,20 @@ import re
|
||||
|
||||
from dcmanager.common.consts import ERROR_DESC_CMD
|
||||
from dcmanager.common.consts import STRATEGY_STATE_COMPLETE
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
from dcmanager.common.consts import (
|
||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||
)
|
||||
from dcmanager.common import utils
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.orchestrator.cache.cache_specifications import \
|
||||
REGION_ONE_KUBERNETES_CACHE_TYPE
|
||||
from dcmanager.orchestrator.cache.cache_specifications import (
|
||||
REGION_ONE_KUBERNETES_CACHE_TYPE,
|
||||
)
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
|
||||
# These following alarms can occur during a vim orchestrated k8s upgrade on the
|
||||
# subcloud. By ignoring the alarms, subcloud k8s upgrade can be
|
||||
# retried after a failure using DC orchestrator.
|
||||
ALARM_IGNORE_LIST = ['100.003', '200.001', '700.004', '750.006',
|
||||
'900.007', '900.401']
|
||||
ALARM_IGNORE_LIST = ["100.003", "200.001", "700.004", "750.006", "900.007", "900.401"]
|
||||
|
||||
|
||||
class KubeUpgradePreCheckState(BaseState):
|
||||
@ -29,7 +30,8 @@ class KubeUpgradePreCheckState(BaseState):
|
||||
def __init__(self, region_name):
|
||||
super(KubeUpgradePreCheckState, self).__init__(
|
||||
next_state=STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||
region_name=region_name)
|
||||
region_name=region_name,
|
||||
)
|
||||
|
||||
def perform_state_action(self, strategy_step):
|
||||
"""This state will determine the starting state for kube upgrade
|
||||
@ -48,42 +50,57 @@ class KubeUpgradePreCheckState(BaseState):
|
||||
a partially upgraded subcloud to be skipped.
|
||||
"""
|
||||
system_health = self.get_sysinv_client(
|
||||
self.region_name).get_kube_upgrade_health()
|
||||
fails = re.findall("\[Fail\]", system_health)
|
||||
failed_alarm_check = re.findall("No alarms: \[Fail\]", system_health)
|
||||
no_mgmt_alarms = re.findall("\[0\] of which are management affecting",
|
||||
system_health)
|
||||
self.region_name
|
||||
).get_kube_upgrade_health()
|
||||
fails = re.findall(r"\[Fail\]", system_health)
|
||||
failed_alarm_check = re.findall(r"No alarms: \[Fail\]", system_health)
|
||||
no_mgmt_alarms = re.findall(
|
||||
r"\[0\] of which are management affecting", system_health
|
||||
)
|
||||
if not fails or (len(fails) == 1 and failed_alarm_check and no_mgmt_alarms):
|
||||
self.info_log(strategy_step, "Kubernetes upgrade health check passed.")
|
||||
elif (len(fails) == 1 and failed_alarm_check):
|
||||
elif len(fails) == 1 and failed_alarm_check:
|
||||
alarms = self.get_fm_client(self.region_name).get_alarms()
|
||||
for alarm in alarms:
|
||||
if alarm.alarm_id not in ALARM_IGNORE_LIST:
|
||||
if alarm.mgmt_affecting == "True":
|
||||
error_desc_msg = (
|
||||
"Kubernetes upgrade health check failed due to alarm "
|
||||
"%s. Kubernetes upgrade health: \n %s" % (
|
||||
alarm.alarm_id, system_health))
|
||||
"Kubernetes upgrade health check failed due to alarm %s. "
|
||||
"Kubernetes upgrade health: \n %s"
|
||||
% (alarm.alarm_id, system_health)
|
||||
)
|
||||
db_api.subcloud_update(
|
||||
self.context, strategy_step.subcloud_id,
|
||||
error_description=error_desc_msg)
|
||||
self.context,
|
||||
strategy_step.subcloud_id,
|
||||
error_description=error_desc_msg,
|
||||
)
|
||||
self.error_log(strategy_step, "\n" + system_health)
|
||||
raise Exception((
|
||||
"Kubernetes upgrade health check failed due to alarm "
|
||||
"%s. Please run 'system health-query-kube-upgrade' "
|
||||
"command on the subcloud or %s on central for details." %
|
||||
(alarm.alarm_id, ERROR_DESC_CMD)))
|
||||
raise Exception(
|
||||
(
|
||||
"Kubernetes upgrade health check failed due to alarm "
|
||||
"%s. Please run 'system health-query-kube-upgrade' "
|
||||
"command on the subcloud or %s on central for details."
|
||||
% (alarm.alarm_id, ERROR_DESC_CMD)
|
||||
)
|
||||
)
|
||||
else:
|
||||
error_desc_msg = ("Kubernetes upgrade health check failed. \n %s" %
|
||||
system_health)
|
||||
error_desc_msg = (
|
||||
"Kubernetes upgrade health check failed. \n %s" % system_health
|
||||
)
|
||||
self.error_log(strategy_step, "\n" + system_health)
|
||||
db_api.subcloud_update(
|
||||
self.context, strategy_step.subcloud_id,
|
||||
error_description=error_desc_msg)
|
||||
raise Exception(("Kubernetes upgrade health check failed. "
|
||||
"Please run 'system health-query-kube-upgrade' "
|
||||
"command on the subcloud or %s on central for details"
|
||||
% (ERROR_DESC_CMD)))
|
||||
self.context,
|
||||
strategy_step.subcloud_id,
|
||||
error_description=error_desc_msg,
|
||||
)
|
||||
raise Exception(
|
||||
(
|
||||
"Kubernetes upgrade health check failed. "
|
||||
"Please run 'system health-query-kube-upgrade' "
|
||||
"command on the subcloud or %s on central for details"
|
||||
% (ERROR_DESC_CMD)
|
||||
)
|
||||
)
|
||||
|
||||
# check extra_args for the strategy
|
||||
# if there is a to-version, use that when checking against the subcloud
|
||||
@ -92,7 +109,7 @@ class KubeUpgradePreCheckState(BaseState):
|
||||
extra_args = utils.get_sw_update_strategy_extra_args(self.context)
|
||||
if extra_args is None:
|
||||
extra_args = {}
|
||||
to_version = extra_args.get('to-version', None)
|
||||
to_version = extra_args.get("to-version", None)
|
||||
if to_version is None:
|
||||
sys_kube_versions = self._read_from_cache(REGION_ONE_KUBERNETES_CACHE_TYPE)
|
||||
to_version = utils.get_active_kube_version(sys_kube_versions)
|
||||
@ -106,24 +123,27 @@ class KubeUpgradePreCheckState(BaseState):
|
||||
# Get any existing kubernetes upgrade operation in the subcloud,
|
||||
# and use its to-version rather than the 'available' version for
|
||||
# determining whether or not to skip.
|
||||
subcloud_kube_upgrades = \
|
||||
self.get_sysinv_client(self.region_name).get_kube_upgrades()
|
||||
subcloud_kube_upgrades = self.get_sysinv_client(
|
||||
self.region_name
|
||||
).get_kube_upgrades()
|
||||
if len(subcloud_kube_upgrades) > 0:
|
||||
target_version = subcloud_kube_upgrades[0].to_version
|
||||
self.debug_log(strategy_step,
|
||||
"Pre-Check. Existing Kubernetes upgrade:(%s) exists"
|
||||
% target_version)
|
||||
self.debug_log(
|
||||
strategy_step,
|
||||
"Pre-Check. Existing Kubernetes upgrade:(%s) exists" % target_version,
|
||||
)
|
||||
else:
|
||||
# The subcloud can only be upgraded to an 'available' version
|
||||
subcloud_kube_versions = \
|
||||
self.get_sysinv_client(self.region_name).get_kube_versions()
|
||||
target_version = \
|
||||
utils.select_available_kube_version(
|
||||
subcloud_kube_versions, to_version
|
||||
)
|
||||
self.debug_log(strategy_step,
|
||||
"Pre-Check. Available Kubernetes upgrade:(%s)"
|
||||
% target_version)
|
||||
subcloud_kube_versions = self.get_sysinv_client(
|
||||
self.region_name
|
||||
).get_kube_versions()
|
||||
target_version = utils.select_available_kube_version(
|
||||
subcloud_kube_versions, to_version
|
||||
)
|
||||
self.debug_log(
|
||||
strategy_step,
|
||||
"Pre-Check. Available Kubernetes upgrade:(%s)" % target_version,
|
||||
)
|
||||
|
||||
# For the to-version, the code currently allows a partial version
|
||||
# ie: v1.20 or a version that is much higher than is installed.
|
||||
@ -158,14 +178,16 @@ class KubeUpgradePreCheckState(BaseState):
|
||||
if should_skip:
|
||||
# Add a log indicating we are skipping (and why)
|
||||
self.override_next_state(STRATEGY_STATE_COMPLETE)
|
||||
self.info_log(strategy_step,
|
||||
"Pre-Check Skip. Orchestration To-Version:(%s). "
|
||||
"Subcloud To-Version:(%s)"
|
||||
% (to_version, target_version))
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Pre-Check Skip. Orchestration To-Version:(%s). "
|
||||
"Subcloud To-Version:(%s)" % (to_version, target_version),
|
||||
)
|
||||
else:
|
||||
# Add a log indicating what we expect the next state to 'target'
|
||||
self.info_log(strategy_step,
|
||||
"Pre-Check Pass. Orchestration To-Version:(%s). "
|
||||
" Subcloud To-Version:(%s)"
|
||||
% (to_version, target_version))
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Pre-Check Pass. Orchestration To-Version:(%s). "
|
||||
"Subcloud To-Version:(%s)" % (to_version, target_version),
|
||||
)
|
||||
return self.next_state
|
||||
|
@ -1,12 +1,11 @@
|
||||
#
|
||||
# Copyright (c) 2021-2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2021-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.applying_vim_strategy \
|
||||
import ApplyingVIMStrategyState
|
||||
from dcmanager.orchestrator.states.applying_vim_strategy import ApplyingVIMStrategyState
|
||||
|
||||
|
||||
# Max time: 120 minutes = 120 queries x 60 seconds
|
||||
@ -23,4 +22,5 @@ class ApplyingVIMKubeRootcaUpdateStrategyState(ApplyingVIMStrategyState):
|
||||
region_name=region_name,
|
||||
strategy_name=vim.STRATEGY_NAME_KUBE_ROOTCA_UPDATE,
|
||||
wait_attempts=KUBE_ROOTCA_UPDATE_MAX_WAIT_ATTEMPTS,
|
||||
wait_interval=KUBE_ROOTCA_UPDATE_WAIT_INTERVAL)
|
||||
wait_interval=KUBE_ROOTCA_UPDATE_WAIT_INTERVAL,
|
||||
)
|
||||
|
@ -1,64 +1,62 @@
|
||||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import utils as dcmanager_utils
|
||||
from dcmanager.orchestrator.states.creating_vim_strategy \
|
||||
import CreatingVIMStrategyState
|
||||
from dcmanager.orchestrator.states.creating_vim_strategy import CreatingVIMStrategyState
|
||||
|
||||
|
||||
class CreatingVIMKubeRootcaUpdateStrategyState(CreatingVIMStrategyState):
|
||||
"""State for creating the VIM Kube Root CA Update strategy."""
|
||||
|
||||
def __init__(self, region_name):
|
||||
next_state = \
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
next_state = consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
super(CreatingVIMKubeRootcaUpdateStrategyState, self).__init__(
|
||||
next_state=next_state,
|
||||
region_name=region_name,
|
||||
strategy_name=vim.STRATEGY_NAME_KUBE_ROOTCA_UPDATE)
|
||||
strategy_name=vim.STRATEGY_NAME_KUBE_ROOTCA_UPDATE,
|
||||
)
|
||||
|
||||
def _create_vim_strategy(self, strategy_step, region):
|
||||
self.info_log(strategy_step,
|
||||
"Creating (%s) VIM strategy" % self.strategy_name)
|
||||
self.info_log(strategy_step, "Creating (%s) VIM strategy" % self.strategy_name)
|
||||
|
||||
# This strategy supports the following additional kwargs.
|
||||
# cert_file
|
||||
# expiry_date
|
||||
# subject
|
||||
# These kwargs are retrieved from the extra_args of the strategy
|
||||
extra_args = \
|
||||
dcmanager_utils.get_sw_update_strategy_extra_args(self.context)
|
||||
extra_args = dcmanager_utils.get_sw_update_strategy_extra_args(self.context)
|
||||
if extra_args is None:
|
||||
extra_args = {}
|
||||
# Note that extra_args use "-" and not "_" in their keys
|
||||
cert_file = extra_args.get('cert-file', None)
|
||||
expiry_date = extra_args.get('expiry-date', None)
|
||||
subject = extra_args.get('subject', None)
|
||||
cert_file = extra_args.get("cert-file", None)
|
||||
expiry_date = extra_args.get("expiry-date", None)
|
||||
subject = extra_args.get("subject", None)
|
||||
|
||||
# Get the update options
|
||||
opts_dict = dcmanager_utils.get_sw_update_opts(
|
||||
self.context,
|
||||
for_sw_update=True,
|
||||
subcloud_id=strategy_step.subcloud_id)
|
||||
self.context, for_sw_update=True, subcloud_id=strategy_step.subcloud_id
|
||||
)
|
||||
|
||||
# Call the API to build the VIM strategy
|
||||
subcloud_strategy = self.get_vim_client(region).create_strategy(
|
||||
self.strategy_name,
|
||||
opts_dict['storage-apply-type'],
|
||||
opts_dict['worker-apply-type'],
|
||||
opts_dict['max-parallel-workers'],
|
||||
opts_dict['default-instance-action'],
|
||||
opts_dict['alarm-restriction-type'],
|
||||
opts_dict["storage-apply-type"],
|
||||
opts_dict["worker-apply-type"],
|
||||
opts_dict["max-parallel-workers"],
|
||||
opts_dict["default-instance-action"],
|
||||
opts_dict["alarm-restriction-type"],
|
||||
cert_file=cert_file,
|
||||
expiry_date=expiry_date,
|
||||
subject=subject)
|
||||
subject=subject,
|
||||
)
|
||||
|
||||
# a successful API call to create MUST set the state be 'building'
|
||||
if subcloud_strategy.state != vim.STATE_BUILDING:
|
||||
raise Exception("Unexpected VIM strategy build state: %s"
|
||||
% subcloud_strategy.state)
|
||||
raise Exception(
|
||||
"Unexpected VIM strategy build state: %s" % subcloud_strategy.state
|
||||
)
|
||||
return subcloud_strategy
|
||||
|
@ -1,12 +1,12 @@
|
||||
#
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START
|
||||
from dcmanager.common.consts import (
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
from dcmanager.common.consts import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START
|
||||
from dcmanager.common import utils as dcmanager_utils
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
|
||||
@ -17,7 +17,8 @@ class KubeRootcaUpdatePreCheckState(BaseState):
|
||||
def __init__(self, region_name):
|
||||
super(KubeRootcaUpdatePreCheckState, self).__init__(
|
||||
next_state=STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
region_name=region_name)
|
||||
region_name=region_name,
|
||||
)
|
||||
|
||||
def perform_state_action(self, strategy_step):
|
||||
"""This state will determine the starting state for kube rootca update
|
||||
@ -27,11 +28,10 @@ class KubeRootcaUpdatePreCheckState(BaseState):
|
||||
"""
|
||||
# check extra_args for the strategy
|
||||
# if there is a cert_file, we should manually setup the cert
|
||||
extra_args = \
|
||||
dcmanager_utils.get_sw_update_strategy_extra_args(self.context)
|
||||
extra_args = dcmanager_utils.get_sw_update_strategy_extra_args(self.context)
|
||||
if extra_args is None:
|
||||
extra_args = {}
|
||||
cert_file = extra_args.get('cert-file', None)
|
||||
cert_file = extra_args.get("cert-file", None)
|
||||
if cert_file:
|
||||
# this will be validated in the upload state
|
||||
self.override_next_state(STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START)
|
||||
|
@ -1,15 +1,15 @@
|
||||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dccommon.drivers.openstack.sysinv_v1 import KUBE_ROOTCA_UPDATE_ABORTED
|
||||
from dccommon.drivers.openstack.sysinv_v1 import KUBE_ROOTCA_UPDATE_STARTED
|
||||
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT
|
||||
from dcmanager.common.consts import (
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
from dcmanager.common.consts import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT
|
||||
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
|
||||
@ -20,7 +20,8 @@ class KubeRootcaUpdateStartState(BaseState):
|
||||
def __init__(self, region_name):
|
||||
super(KubeRootcaUpdateStartState, self).__init__(
|
||||
next_state=STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT,
|
||||
region_name=region_name)
|
||||
region_name=region_name,
|
||||
)
|
||||
|
||||
def _start_kube_rootca_update(self, strategy_step):
|
||||
"""Start a kube rootca update
|
||||
@ -28,9 +29,9 @@ class KubeRootcaUpdateStartState(BaseState):
|
||||
This is a blocking API call.
|
||||
returns the kube rootca update object.
|
||||
"""
|
||||
return self.get_sysinv_client(
|
||||
self.region_name).kube_rootca_update_start(force=True,
|
||||
alarm_ignore_list=[])
|
||||
return self.get_sysinv_client(self.region_name).kube_rootca_update_start(
|
||||
force=True, alarm_ignore_list=[]
|
||||
)
|
||||
|
||||
def perform_state_action(self, strategy_step):
|
||||
"""Start the update.
|
||||
@ -41,8 +42,7 @@ class KubeRootcaUpdateStartState(BaseState):
|
||||
Returns the next state for the state machine if successful.
|
||||
"""
|
||||
update = None
|
||||
updates = \
|
||||
self.get_sysinv_client(self.region_name).get_kube_rootca_updates()
|
||||
updates = self.get_sysinv_client(self.region_name).get_kube_rootca_updates()
|
||||
if len(updates) > 0:
|
||||
# There is already an existing kube rootca update in the subcloud
|
||||
update = updates[0]
|
||||
@ -59,10 +59,10 @@ class KubeRootcaUpdateStartState(BaseState):
|
||||
self.info_log(strategy_step, "Update started")
|
||||
else:
|
||||
# An unexpected update state. override the next state to use VIM
|
||||
self.info_log(strategy_step,
|
||||
"Update in [%s] state." % update.state)
|
||||
self.info_log(strategy_step, "Update in [%s] state." % update.state)
|
||||
self.override_next_state(
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY)
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
)
|
||||
|
||||
# Success. Move to the next stage
|
||||
return self.next_state
|
||||
|
@ -1,10 +1,11 @@
|
||||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
from dcmanager.common.consts import (
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
from dcmanager.common.exceptions import CertificateUploadError
|
||||
from dcmanager.common import utils as dcmanager_utils
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
@ -16,21 +17,22 @@ class KubeRootcaUpdateUploadCertState(BaseState):
|
||||
def __init__(self, region_name):
|
||||
super(KubeRootcaUpdateUploadCertState, self).__init__(
|
||||
next_state=STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
region_name=region_name)
|
||||
region_name=region_name,
|
||||
)
|
||||
|
||||
def perform_state_action(self, strategy_step):
|
||||
"""Upload the cert. Only a valid state if the update is started"""
|
||||
|
||||
# Get the cert-file from the extra_args of the strategy
|
||||
extra_args = \
|
||||
dcmanager_utils.get_sw_update_strategy_extra_args(self.context)
|
||||
extra_args = dcmanager_utils.get_sw_update_strategy_extra_args(self.context)
|
||||
if extra_args is None:
|
||||
extra_args = {}
|
||||
cert_file = extra_args.get('cert-file', None)
|
||||
cert_file = extra_args.get("cert-file", None)
|
||||
if cert_file:
|
||||
with open(cert_file, 'rb') as pem_file:
|
||||
with open(cert_file, "rb") as pem_file:
|
||||
cert_upload = self.get_sysinv_client(
|
||||
self.region_name).kube_rootca_update_upload_cert(pem_file)
|
||||
self.region_name
|
||||
).kube_rootca_update_upload_cert(pem_file)
|
||||
# If the upload has an error, we fail the state
|
||||
# this will log the error and subcloud info
|
||||
if cert_upload.get("error"):
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@ -1,13 +1,12 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.creating_vim_strategy import \
|
||||
CreatingVIMStrategyState
|
||||
from dcmanager.orchestrator.states.creating_vim_strategy import CreatingVIMStrategyState
|
||||
|
||||
|
||||
# Max time: 2 minutes = 12 queries x 10 seconds between
|
||||
@ -22,7 +21,8 @@ class CreatingVIMPatchStrategyState(CreatingVIMStrategyState):
|
||||
super(CreatingVIMPatchStrategyState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_APPLYING_VIM_PATCH_STRATEGY,
|
||||
region_name=region_name,
|
||||
strategy_name=vim.STRATEGY_NAME_SW_PATCH)
|
||||
strategy_name=vim.STRATEGY_NAME_SW_PATCH,
|
||||
)
|
||||
|
||||
self.SKIP_REASON = "no software patches need to be applied"
|
||||
self.SKIP_STATE = consts.STRATEGY_STATE_FINISHING_PATCH_STRATEGY
|
||||
@ -34,11 +34,15 @@ class CreatingVIMPatchStrategyState(CreatingVIMStrategyState):
|
||||
def skip_check(self, strategy_step, subcloud_strategy):
|
||||
"""Check if the VIM stategy needs to be skipped"""
|
||||
|
||||
if (subcloud_strategy and
|
||||
(subcloud_strategy.state == vim.STATE_BUILD_FAILED) and
|
||||
(subcloud_strategy.build_phase.reason == self.SKIP_REASON)):
|
||||
self.info_log(strategy_step, "Skip forward in state machine due to:"
|
||||
" ({})".format(self.SKIP_REASON))
|
||||
if (
|
||||
subcloud_strategy
|
||||
and (subcloud_strategy.state == vim.STATE_BUILD_FAILED)
|
||||
and (subcloud_strategy.build_phase.reason == self.SKIP_REASON)
|
||||
):
|
||||
self.info_log(
|
||||
strategy_step,
|
||||
"Skip forward in state machine due to: ({})".format(self.SKIP_REASON),
|
||||
)
|
||||
return self.SKIP_STATE
|
||||
|
||||
# If we get here, there is not a reason to skip
|
||||
|
@ -15,14 +15,13 @@ class PreCheckState(BaseState):
|
||||
|
||||
def __init__(self, region_name):
|
||||
super(PreCheckState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_UPDATING_PATCHES,
|
||||
region_name=region_name)
|
||||
next_state=consts.STRATEGY_STATE_UPDATING_PATCHES, region_name=region_name
|
||||
)
|
||||
|
||||
def has_mgmt_affecting_alarms(self, ignored_alarms=()):
|
||||
alarms = self.get_fm_client(self.region_name).get_alarms()
|
||||
for alarm in alarms:
|
||||
if alarm.mgmt_affecting == "True" and \
|
||||
alarm.alarm_id not in ignored_alarms:
|
||||
if alarm.mgmt_affecting == "True" and alarm.alarm_id not in ignored_alarms:
|
||||
return True
|
||||
# No management affecting alarms
|
||||
return False
|
||||
@ -35,15 +34,17 @@ class PreCheckState(BaseState):
|
||||
message = None
|
||||
try:
|
||||
if self.has_mgmt_affecting_alarms(ignored_alarms=IGNORED_ALARMS_IDS):
|
||||
message = ("Subcloud contains one or more management affecting"
|
||||
" alarm(s). It will not be patched. Please resolve"
|
||||
" the alarm condition(s) and try again.")
|
||||
message = (
|
||||
"Subcloud contains one or more management affecting alarm(s). "
|
||||
"It will not be patched. Please resolve the alarm condition(s) "
|
||||
"and try again."
|
||||
)
|
||||
except Exception as e:
|
||||
self.exception_log(strategy_step,
|
||||
"Failed to obtain subcloud alarm report")
|
||||
message = ("Failed to obtain subcloud alarm report due to: (%s)."
|
||||
" Please see /var/log/dcmanager/orchestrator.log for"
|
||||
" details" % str(e))
|
||||
self.exception_log(strategy_step, "Failed to obtain subcloud alarm report")
|
||||
message = (
|
||||
"Failed to obtain subcloud alarm report due to: (%s). "
|
||||
"Please see /var/log/dcmanager/orchestrator.log for details" % str(e)
|
||||
)
|
||||
|
||||
if message:
|
||||
raise Exception(message)
|
||||
|
@ -22,7 +22,8 @@ class PrestageState(BaseState):
|
||||
|
||||
def __init__(self, next_state, region_name):
|
||||
super(PrestageState, self).__init__(
|
||||
next_state=next_state, region_name=region_name)
|
||||
next_state=next_state, region_name=region_name
|
||||
)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _do_state_action(self, strategy_step):
|
||||
@ -35,8 +36,8 @@ class PrestageState(BaseState):
|
||||
except exceptions.StrategySkippedException:
|
||||
# Move prestage_status back to None (nothing has changed)
|
||||
db_api.subcloud_update(
|
||||
self.context, strategy_step.subcloud.id,
|
||||
prestage_status=None)
|
||||
self.context, strategy_step.subcloud.id, prestage_status=None
|
||||
)
|
||||
raise
|
||||
except Exception:
|
||||
prestage.prestage_fail(self.context, strategy_step.subcloud.id)
|
||||
@ -51,28 +52,34 @@ class PrestagePreCheckState(PrestageState):
|
||||
|
||||
def __init__(self, region_name):
|
||||
super(PrestagePreCheckState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_PRESTAGE_PACKAGES,
|
||||
region_name=region_name)
|
||||
next_state=consts.STRATEGY_STATE_PRESTAGE_PACKAGES, region_name=region_name
|
||||
)
|
||||
|
||||
@utils.synchronized('prestage-update-extra-args', external=True)
|
||||
@utils.synchronized("prestage-update-extra-args", external=True)
|
||||
def _update_oam_floating_ip(self, strategy_step, oam_floating_ip):
|
||||
# refresh the extra_args
|
||||
extra_args = utils.get_sw_update_strategy_extra_args(self.context)
|
||||
if 'oam_floating_ip_dict' in extra_args:
|
||||
LOG.debug("Updating oam_floating_ip_dict: %s: %s",
|
||||
strategy_step.subcloud.name, oam_floating_ip)
|
||||
oam_floating_ip_dict = extra_args['oam_floating_ip_dict']
|
||||
oam_floating_ip_dict[strategy_step.subcloud.name] \
|
||||
= oam_floating_ip
|
||||
if "oam_floating_ip_dict" in extra_args:
|
||||
LOG.debug(
|
||||
"Updating oam_floating_ip_dict: %s: %s",
|
||||
strategy_step.subcloud.name,
|
||||
oam_floating_ip,
|
||||
)
|
||||
oam_floating_ip_dict = extra_args["oam_floating_ip_dict"]
|
||||
oam_floating_ip_dict[strategy_step.subcloud.name] = oam_floating_ip
|
||||
else:
|
||||
LOG.debug("Creating oam_floating_ip_dict: %s: %s",
|
||||
strategy_step.subcloud.name, oam_floating_ip)
|
||||
oam_floating_ip_dict = {
|
||||
strategy_step.subcloud.name: oam_floating_ip
|
||||
}
|
||||
LOG.debug(
|
||||
"Creating oam_floating_ip_dict: %s: %s",
|
||||
strategy_step.subcloud.name,
|
||||
oam_floating_ip,
|
||||
)
|
||||
oam_floating_ip_dict = {strategy_step.subcloud.name: oam_floating_ip}
|
||||
db_api.sw_update_strategy_update(
|
||||
self.context, state=None, update_type=None,
|
||||
additional_args={'oam_floating_ip_dict': oam_floating_ip_dict})
|
||||
self.context,
|
||||
state=None,
|
||||
update_type=None,
|
||||
additional_args={"oam_floating_ip_dict": oam_floating_ip_dict},
|
||||
)
|
||||
|
||||
def _do_state_action(self, strategy_step):
|
||||
extra_args = utils.get_sw_update_strategy_extra_args(self.context)
|
||||
@ -82,15 +89,21 @@ class PrestagePreCheckState(PrestageState):
|
||||
raise Exception(message)
|
||||
|
||||
payload = {
|
||||
'sysadmin_password': extra_args['sysadmin_password'],
|
||||
'force': extra_args['force']
|
||||
"sysadmin_password": extra_args["sysadmin_password"],
|
||||
"force": extra_args["force"],
|
||||
}
|
||||
if extra_args.get(consts.PRESTAGE_SOFTWARE_VERSION):
|
||||
payload.update({consts.PRESTAGE_REQUEST_RELEASE:
|
||||
extra_args.get(consts.PRESTAGE_SOFTWARE_VERSION)})
|
||||
payload.update(
|
||||
{
|
||||
consts.PRESTAGE_REQUEST_RELEASE: extra_args.get(
|
||||
consts.PRESTAGE_SOFTWARE_VERSION
|
||||
)
|
||||
}
|
||||
)
|
||||
try:
|
||||
oam_floating_ip = prestage.validate_prestage(
|
||||
strategy_step.subcloud, payload)
|
||||
strategy_step.subcloud, payload
|
||||
)
|
||||
self._update_oam_floating_ip(strategy_step, oam_floating_ip)
|
||||
|
||||
prestage.prestage_start(self.context, strategy_step.subcloud.id)
|
||||
@ -113,22 +126,27 @@ class PrestagePackagesState(PrestageState):
|
||||
|
||||
def __init__(self, region_name):
|
||||
super(PrestagePackagesState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_PRESTAGE_IMAGES,
|
||||
region_name=region_name)
|
||||
next_state=consts.STRATEGY_STATE_PRESTAGE_IMAGES, region_name=region_name
|
||||
)
|
||||
|
||||
def _do_state_action(self, strategy_step):
|
||||
extra_args = utils.get_sw_update_strategy_extra_args(self.context)
|
||||
payload = {
|
||||
'sysadmin_password': extra_args['sysadmin_password'],
|
||||
'oam_floating_ip':
|
||||
extra_args['oam_floating_ip_dict'][strategy_step.subcloud.name],
|
||||
'force': extra_args['force']
|
||||
"sysadmin_password": extra_args["sysadmin_password"],
|
||||
"oam_floating_ip": extra_args["oam_floating_ip_dict"][
|
||||
strategy_step.subcloud.name
|
||||
],
|
||||
"force": extra_args["force"],
|
||||
}
|
||||
if extra_args.get(consts.PRESTAGE_SOFTWARE_VERSION):
|
||||
payload.update({consts.PRESTAGE_REQUEST_RELEASE:
|
||||
extra_args.get(consts.PRESTAGE_SOFTWARE_VERSION)})
|
||||
prestage.prestage_packages(self.context,
|
||||
strategy_step.subcloud, payload)
|
||||
payload.update(
|
||||
{
|
||||
consts.PRESTAGE_REQUEST_RELEASE: extra_args.get(
|
||||
consts.PRESTAGE_SOFTWARE_VERSION
|
||||
)
|
||||
}
|
||||
)
|
||||
prestage.prestage_packages(self.context, strategy_step.subcloud, payload)
|
||||
self.info_log(strategy_step, "Packages finished")
|
||||
|
||||
|
||||
@ -137,30 +155,37 @@ class PrestageImagesState(PrestageState):
|
||||
|
||||
def __init__(self, region_name):
|
||||
super(PrestageImagesState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_COMPLETE,
|
||||
region_name=region_name)
|
||||
next_state=consts.STRATEGY_STATE_COMPLETE, region_name=region_name
|
||||
)
|
||||
|
||||
def _do_state_action(self, strategy_step):
|
||||
log_file = utils.get_subcloud_ansible_log_file(
|
||||
strategy_step.subcloud.name)
|
||||
log_file = utils.get_subcloud_ansible_log_file(strategy_step.subcloud.name)
|
||||
# Get the prestage versions from the ansible playbook logs
|
||||
# generated by the previous step - prestage packages.
|
||||
prestage_versions = utils.get_msg_output_info(
|
||||
log_file,
|
||||
prestage.PRINT_PRESTAGE_VERSIONS_TASK,
|
||||
prestage.PRESTAGE_VERSIONS_KEY_STR)
|
||||
prestage.PRESTAGE_VERSIONS_KEY_STR,
|
||||
)
|
||||
|
||||
extra_args = utils.get_sw_update_strategy_extra_args(self.context)
|
||||
payload = {
|
||||
'sysadmin_password': extra_args['sysadmin_password'],
|
||||
'oam_floating_ip':
|
||||
extra_args['oam_floating_ip_dict'][strategy_step.subcloud.name],
|
||||
'force': extra_args['force']
|
||||
"sysadmin_password": extra_args["sysadmin_password"],
|
||||
"oam_floating_ip": extra_args["oam_floating_ip_dict"][
|
||||
strategy_step.subcloud.name
|
||||
],
|
||||
"force": extra_args["force"],
|
||||
}
|
||||
if extra_args.get(consts.PRESTAGE_SOFTWARE_VERSION):
|
||||
payload.update({consts.PRESTAGE_REQUEST_RELEASE:
|
||||
extra_args.get(consts.PRESTAGE_SOFTWARE_VERSION)})
|
||||
payload.update(
|
||||
{
|
||||
consts.PRESTAGE_REQUEST_RELEASE: extra_args.get(
|
||||
consts.PRESTAGE_SOFTWARE_VERSION
|
||||
)
|
||||
}
|
||||
)
|
||||
prestage.prestage_images(self.context, strategy_step.subcloud, payload)
|
||||
self.info_log(strategy_step, "Images finished")
|
||||
prestage.prestage_complete(
|
||||
self.context, strategy_step.subcloud.id, prestage_versions)
|
||||
self.context, strategy_step.subcloud.id, prestage_versions
|
||||
)
|
||||
|
@ -6,8 +6,7 @@
|
||||
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.applying_vim_strategy import \
|
||||
ApplyingVIMStrategyState
|
||||
from dcmanager.orchestrator.states.applying_vim_strategy import ApplyingVIMStrategyState
|
||||
|
||||
|
||||
class ApplyVIMSoftwareStrategyState(ApplyingVIMStrategyState):
|
||||
@ -17,5 +16,5 @@ class ApplyVIMSoftwareStrategyState(ApplyingVIMStrategyState):
|
||||
super().__init__(
|
||||
next_state=consts.STRATEGY_STATE_SW_FINISH_STRATEGY,
|
||||
region_name=region_name,
|
||||
strategy_name=vim.STRATEGY_NAME_SW_USM
|
||||
strategy_name=vim.STRATEGY_NAME_SW_USM,
|
||||
)
|
||||
|
@ -1,22 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.software.cache import clients
|
||||
from dcmanager.orchestrator.states.software.cache.clients import \
|
||||
CLIENT_READ_EXCEPTIONS
|
||||
from dcmanager.orchestrator.states.software.cache.clients import \
|
||||
CLIENT_READ_MAX_ATTEMPTS
|
||||
from dcmanager.orchestrator.states.software.cache.clients import CLIENT_READ_EXCEPTIONS
|
||||
from dcmanager.orchestrator.states.software.cache.clients import (
|
||||
CLIENT_READ_MAX_ATTEMPTS,
|
||||
)
|
||||
|
||||
|
||||
class CacheSpecification(object):
|
||||
def __init__(self, fetch_implementation,
|
||||
post_filter_implementation=None, valid_filters=frozenset(),
|
||||
retry_on_exception=CLIENT_READ_EXCEPTIONS,
|
||||
max_attempts=CLIENT_READ_MAX_ATTEMPTS,
|
||||
retry_sleep_msecs=consts.PLATFORM_RETRY_SLEEP_MILLIS):
|
||||
def __init__(
|
||||
self,
|
||||
fetch_implementation,
|
||||
post_filter_implementation=None,
|
||||
valid_filters=frozenset(),
|
||||
retry_on_exception=CLIENT_READ_EXCEPTIONS,
|
||||
max_attempts=CLIENT_READ_MAX_ATTEMPTS,
|
||||
retry_sleep_msecs=consts.PLATFORM_RETRY_SLEEP_MILLIS,
|
||||
):
|
||||
"""Create cache specification.
|
||||
|
||||
:param fetch_implementation: implementation on how to retrieve data from
|
||||
@ -45,17 +49,19 @@ class CacheSpecification(object):
|
||||
|
||||
"""Cache types"""
|
||||
|
||||
REGION_ONE_LICENSE_CACHE_TYPE = 'RegionOne system license'
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_TYPE = 'RegionOne system info'
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE = 'RegionOne release usm'
|
||||
REGION_ONE_LICENSE_CACHE_TYPE = "RegionOne system license"
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_TYPE = "RegionOne system info"
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE = "RegionOne release usm"
|
||||
|
||||
"""Cache specifications"""
|
||||
|
||||
REGION_ONE_LICENSE_CACHE_SPECIFICATION = CacheSpecification(
|
||||
lambda: clients.get_sysinv_client().get_license())
|
||||
lambda: clients.get_sysinv_client().get_license()
|
||||
)
|
||||
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_SPECIFICATION = CacheSpecification(
|
||||
lambda: clients.get_sysinv_client().get_system())
|
||||
lambda: clients.get_sysinv_client().get_system()
|
||||
)
|
||||
|
||||
REGION_ONE_RELEASE_USM_CACHE_SPECIFICATION = CacheSpecification(
|
||||
lambda: clients.get_software_client().list(),
|
||||
@ -77,21 +83,25 @@ REGION_ONE_RELEASE_USM_CACHE_SPECIFICATION = CacheSpecification(
|
||||
|
||||
# Map each expected operation type to its required cache types
|
||||
CACHE_TYPES_BY_OPERATION_TYPE = {
|
||||
consts.SW_UPDATE_TYPE_SOFTWARE: {REGION_ONE_LICENSE_CACHE_TYPE,
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_TYPE,
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE}
|
||||
consts.SW_UPDATE_TYPE_SOFTWARE: {
|
||||
REGION_ONE_LICENSE_CACHE_TYPE,
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_TYPE,
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE,
|
||||
}
|
||||
}
|
||||
|
||||
# Map each cache type to its corresponding cache specification
|
||||
SPECIFICATION_BY_CACHE_TYPE = {
|
||||
REGION_ONE_LICENSE_CACHE_TYPE: REGION_ONE_LICENSE_CACHE_SPECIFICATION,
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_TYPE: REGION_ONE_SYSTEM_INFO_CACHE_SPECIFICATION,
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE: REGION_ONE_RELEASE_USM_CACHE_SPECIFICATION
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE: REGION_ONE_RELEASE_USM_CACHE_SPECIFICATION,
|
||||
}
|
||||
|
||||
|
||||
def get_specifications_for_operation(operation_type):
|
||||
# Retrieve all cache specifications required by a given operation type
|
||||
# Return a mapping between each required type to its corresponding specification
|
||||
return {cache_type: SPECIFICATION_BY_CACHE_TYPE.get(cache_type)
|
||||
for cache_type in CACHE_TYPES_BY_OPERATION_TYPE.get(operation_type)}
|
||||
return {
|
||||
cache_type: SPECIFICATION_BY_CACHE_TYPE.get(cache_type)
|
||||
for cache_type in CACHE_TYPES_BY_OPERATION_TYPE.get(operation_type)
|
||||
}
|
||||
|
@ -27,15 +27,21 @@ CLIENT_READ_MAX_ATTEMPTS = 2
|
||||
|
||||
def get_sysinv_client():
|
||||
ks_client = get_keystone_client()
|
||||
return SysinvClient(dccommon_consts.DEFAULT_REGION_NAME, ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'),
|
||||
timeout=CLIENT_READ_TIMEOUT_SECONDS)
|
||||
return SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME,
|
||||
ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint("sysinv"),
|
||||
timeout=CLIENT_READ_TIMEOUT_SECONDS,
|
||||
)
|
||||
|
||||
|
||||
def get_software_client():
|
||||
ks_client = get_keystone_client()
|
||||
return SoftwareClient(dccommon_consts.DEFAULT_REGION_NAME, ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint('usm'))
|
||||
return SoftwareClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME,
|
||||
ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint("usm"),
|
||||
)
|
||||
|
||||
|
||||
def get_keystone_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
|
||||
@ -49,6 +55,5 @@ def get_keystone_client(region_name=dccommon_consts.DEFAULT_REGION_NAME):
|
||||
)
|
||||
return os_client.keystone_client
|
||||
except Exception:
|
||||
LOG.warning('Failure initializing KeystoneClient for region: %s'
|
||||
% region_name)
|
||||
LOG.warning("Failure initializing KeystoneClient for region: %s" % region_name)
|
||||
raise
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -8,8 +8,9 @@ from oslo_log import log
|
||||
|
||||
from dcmanager.common.exceptions import InvalidParameterValue
|
||||
from dcmanager.orchestrator.states.software.cache import cache_specifications
|
||||
from dcmanager.orchestrator.states.software.cache.shared_client_cache import \
|
||||
SharedClientCache
|
||||
from dcmanager.orchestrator.states.software.cache.shared_client_cache import (
|
||||
SharedClientCache,
|
||||
)
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
@ -23,11 +24,12 @@ class SharedCacheRepository(object):
|
||||
def initialize_caches(self):
|
||||
# Retrieve specifications for each cache type required by the operation
|
||||
# Return mapping between each required type to a single cache instance of it
|
||||
specifications_for_operation = (
|
||||
cache_specifications.get_specifications_for_operation(self._operation_type)
|
||||
)
|
||||
self._shared_caches = {
|
||||
cache_type: SharedClientCache(cache_type, cache_specification)
|
||||
for cache_type, cache_specification in
|
||||
cache_specifications.get_specifications_for_operation(
|
||||
self._operation_type).items()
|
||||
for cache_type, cache_specification in specifications_for_operation.items()
|
||||
}
|
||||
|
||||
def read(self, cache_type, **filter_params):
|
||||
@ -35,5 +37,6 @@ class SharedCacheRepository(object):
|
||||
if cache:
|
||||
return cache.read(**filter_params)
|
||||
else:
|
||||
raise InvalidParameterValue(err="Specified cache type '%s' not "
|
||||
"present" % cache_type)
|
||||
raise InvalidParameterValue(
|
||||
err="Specified cache type '%s' not present" % cache_type
|
||||
)
|
||||
|
@ -48,17 +48,20 @@ class SharedClientCache(object):
|
||||
self._load_data_from_client = cache_specification.fetch_implementation
|
||||
retry_on_exception = cache_specification.retry_on_exception
|
||||
if retry_on_exception:
|
||||
retry = retrying.retry(retry_on_exception=lambda
|
||||
ex: isinstance(ex, retry_on_exception),
|
||||
stop_max_attempt_number=self._max_attempts,
|
||||
wait_fixed=self._retry_sleep_msecs,
|
||||
wait_func=self._retry_client_read)
|
||||
self._load_data_from_client = \
|
||||
retry(cache_specification.fetch_implementation)
|
||||
retry = retrying.retry(
|
||||
retry_on_exception=lambda ex: isinstance(ex, retry_on_exception),
|
||||
stop_max_attempt_number=self._max_attempts,
|
||||
wait_fixed=self._retry_sleep_msecs,
|
||||
wait_func=self._retry_client_read,
|
||||
)
|
||||
self._load_data_from_client = retry(
|
||||
cache_specification.fetch_implementation
|
||||
)
|
||||
|
||||
# Use default implementation with no filtering if none is provided
|
||||
self._post_filter_impl = cache_specification.post_filter_implementation\
|
||||
or (lambda data, **filter_params: data)
|
||||
self._post_filter_impl = cache_specification.post_filter_implementation or (
|
||||
lambda data, **filter_params: data
|
||||
)
|
||||
|
||||
def read(self, **filter_params):
|
||||
"""Retrieve data from cache, if available.
|
||||
@ -92,22 +95,24 @@ class SharedClientCache(object):
|
||||
if self._client_lock.owner != lockutils.ReaderWriterLock.WRITER:
|
||||
with self._client_lock.write_lock():
|
||||
# Atomically fetch data from client and update the cache
|
||||
LOG.info("Reading data from %s client for caching" %
|
||||
self._cache_type)
|
||||
LOG.info("Reading data from %s client for caching" % self._cache_type)
|
||||
self._cache = self._load_data_from_client()
|
||||
else:
|
||||
# If a concurrent write is in progress, wait for it and recheck cache
|
||||
with self._client_lock.read_lock():
|
||||
if self._cache is None:
|
||||
raise RuntimeError("Failed to retrieve data from %s cache. "
|
||||
"Possible failure on concurrent client "
|
||||
"read." % self._cache_type)
|
||||
raise RuntimeError(
|
||||
"Failed to retrieve data from %s cache. "
|
||||
"Possible failure on concurrent client read." % self._cache_type
|
||||
)
|
||||
|
||||
def _retry_client_read(self, attempt, _):
|
||||
# To be called when a client read operation fails with a retryable error
|
||||
# After this, read operation should be retried
|
||||
LOG.warn("Retryable error occurred while reading from %s client "
|
||||
"(Attempt %s/%s)" % (self._cache_type, attempt, self._max_attempts))
|
||||
LOG.warn(
|
||||
"Retryable error occurred while reading from %s client (Attempt %s/%s)"
|
||||
% (self._cache_type, attempt, self._max_attempts)
|
||||
)
|
||||
return self._retry_sleep_msecs
|
||||
|
||||
def _post_filter(self, data, **filter_params):
|
||||
@ -121,5 +126,6 @@ class SharedClientCache(object):
|
||||
if filter_params:
|
||||
invalid_params = set(filter_params.keys()) - self._valid_filters
|
||||
if invalid_params:
|
||||
raise InvalidParameterValue(err="Invalid filter parameters: %s" %
|
||||
invalid_params)
|
||||
raise InvalidParameterValue(
|
||||
err="Invalid filter parameters: %s" % invalid_params
|
||||
)
|
||||
|
@ -6,8 +6,7 @@
|
||||
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.creating_vim_strategy import \
|
||||
CreatingVIMStrategyState
|
||||
from dcmanager.orchestrator.states.creating_vim_strategy import CreatingVIMStrategyState
|
||||
|
||||
|
||||
class CreateVIMSoftwareStrategyState(CreatingVIMStrategyState):
|
||||
@ -17,5 +16,5 @@ class CreateVIMSoftwareStrategyState(CreatingVIMStrategyState):
|
||||
super(CreateVIMSoftwareStrategyState, self).__init__(
|
||||
next_state=consts.STRATEGY_STATE_SW_APPLY_VIM_STRATEGY,
|
||||
region_name=region_name,
|
||||
strategy_name=vim.STRATEGY_NAME_SW_USM
|
||||
strategy_name=vim.STRATEGY_NAME_SW_USM,
|
||||
)
|
||||
|
@ -9,8 +9,9 @@ from dcmanager.common import consts
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import \
|
||||
REGION_ONE_LICENSE_CACHE_TYPE
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import (
|
||||
REGION_ONE_LICENSE_CACHE_TYPE,
|
||||
)
|
||||
|
||||
# When a license is not installed, this will be part of the API error string
|
||||
LICENSE_FILE_NOT_FOUND_SUBSTRING = "License file not found"
|
||||
@ -22,7 +23,7 @@ class InstallLicenseState(BaseState):
|
||||
def __init__(self, region_name):
|
||||
super().__init__(
|
||||
next_state=consts.STRATEGY_STATE_SW_CREATE_VIM_STRATEGY,
|
||||
region_name=region_name
|
||||
region_name=region_name,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@ -37,39 +38,44 @@ class InstallLicenseState(BaseState):
|
||||
"""
|
||||
|
||||
# check if the system controller has a license
|
||||
system_controller_license = self._read_from_cache(
|
||||
REGION_ONE_LICENSE_CACHE_TYPE)
|
||||
system_controller_license = self._read_from_cache(REGION_ONE_LICENSE_CACHE_TYPE)
|
||||
# get_license returns a dictionary with keys: content and error
|
||||
# 'content' can be an empty string in success or failure case.
|
||||
# 'error' is an empty string only in success case.
|
||||
target_license = system_controller_license.get('content')
|
||||
target_error = system_controller_license.get('error')
|
||||
target_license = system_controller_license.get("content")
|
||||
target_error = system_controller_license.get("error")
|
||||
|
||||
# If the system controller does not have a license, do not attempt
|
||||
# to install licenses on subclouds, simply proceed to the next stage
|
||||
if len(target_error) != 0:
|
||||
if LICENSE_FILE_NOT_FOUND_SUBSTRING in target_error:
|
||||
self.info_log(strategy_step,
|
||||
f"System Controller License missing: {target_error}.")
|
||||
self.info_log(
|
||||
strategy_step, f"System Controller License missing: {target_error}."
|
||||
)
|
||||
return self.next_state
|
||||
else:
|
||||
# An unexpected error occurred querying the license
|
||||
message = ('An unexpected error occurred querying the license '
|
||||
f'{dccommon_consts.SYSTEM_CONTROLLER_NAME}. '
|
||||
f'Detail: {target_error}')
|
||||
message = (
|
||||
"An unexpected error occurred querying the license "
|
||||
f"{dccommon_consts.SYSTEM_CONTROLLER_NAME}. Detail: {target_error}"
|
||||
)
|
||||
db_api.subcloud_update(
|
||||
self.context, strategy_step.subcloud_id,
|
||||
error_description=message[0:consts.ERROR_DESCRIPTION_LENGTH])
|
||||
self.context,
|
||||
strategy_step.subcloud_id,
|
||||
error_description=message[0 : consts.ERROR_DESCRIPTION_LENGTH],
|
||||
)
|
||||
raise exceptions.LicenseInstallError(
|
||||
subcloud_id=dccommon_consts.SYSTEM_CONTROLLER_NAME,
|
||||
error_message=target_error)
|
||||
error_message=target_error,
|
||||
)
|
||||
|
||||
# retrieve the keystone session for the subcloud and query its license
|
||||
subcloud_sysinv_client = self.get_sysinv_client(
|
||||
strategy_step.subcloud.region_name)
|
||||
strategy_step.subcloud.region_name
|
||||
)
|
||||
subcloud_license_response = subcloud_sysinv_client.get_license()
|
||||
subcloud_license = subcloud_license_response.get('content')
|
||||
subcloud_error = subcloud_license_response.get('error')
|
||||
subcloud_license = subcloud_license_response.get("content")
|
||||
subcloud_error = subcloud_license_response.get("error")
|
||||
|
||||
# Skip license install if the license is already up to date
|
||||
# If there was not an error, there might be a license
|
||||
@ -84,18 +90,22 @@ class InstallLicenseState(BaseState):
|
||||
|
||||
# Install the license
|
||||
install_rc = subcloud_sysinv_client.install_license(target_license)
|
||||
install_error = install_rc.get('error')
|
||||
install_error = install_rc.get("error")
|
||||
if len(install_error) != 0:
|
||||
# Save error response from sysinv into subcloud error description.
|
||||
# Provide exception with sysinv error response to strategy_step details
|
||||
message = ('Error installing license on subcloud '
|
||||
f'{strategy_step.subcloud.name}. Detail: {install_error}')
|
||||
message = (
|
||||
f"Error installing license on subcloud {strategy_step.subcloud.name}. "
|
||||
f"Detail: {install_error}"
|
||||
)
|
||||
db_api.subcloud_update(
|
||||
self.context, strategy_step.subcloud_id,
|
||||
error_description=message[0:consts.ERROR_DESCRIPTION_LENGTH])
|
||||
self.context,
|
||||
strategy_step.subcloud_id,
|
||||
error_description=message[0 : consts.ERROR_DESCRIPTION_LENGTH],
|
||||
)
|
||||
raise exceptions.LicenseInstallError(
|
||||
subcloud_id=strategy_step.subcloud_id,
|
||||
error_message=install_error)
|
||||
subcloud_id=strategy_step.subcloud_id, error_message=install_error
|
||||
)
|
||||
|
||||
# The license has been successfully installed. Move to the next stage
|
||||
self.info_log(strategy_step, "License installed.")
|
||||
|
@ -31,30 +31,28 @@ from dcmanager.common import prestage
|
||||
from dcmanager.common import utils
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.orchestrator.fw_update_orch_thread import FwUpdateOrchThread
|
||||
from dcmanager.orchestrator.kube_rootca_update_orch_thread \
|
||||
import KubeRootcaUpdateOrchThread
|
||||
from dcmanager.orchestrator.kube_upgrade_orch_thread \
|
||||
import KubeUpgradeOrchThread
|
||||
from dcmanager.orchestrator.kube_rootca_update_orch_thread import (
|
||||
KubeRootcaUpdateOrchThread,
|
||||
)
|
||||
from dcmanager.orchestrator.kube_upgrade_orch_thread import KubeUpgradeOrchThread
|
||||
from dcmanager.orchestrator.patch_orch_thread import PatchOrchThread
|
||||
from dcmanager.orchestrator.prestage_orch_thread import PrestageOrchThread
|
||||
from dcmanager.orchestrator.software_orch_thread import SoftwareOrchThread
|
||||
from dcmanager.orchestrator.validators.firmware_validator import (
|
||||
FirmwareStrategyValidator
|
||||
FirmwareStrategyValidator,
|
||||
)
|
||||
from dcmanager.orchestrator.validators.kube_root_ca_validator import (
|
||||
KubeRootCaStrategyValidator
|
||||
KubeRootCaStrategyValidator,
|
||||
)
|
||||
from dcmanager.orchestrator.validators.kubernetes_validator import (
|
||||
KubernetesStrategyValidator
|
||||
)
|
||||
from dcmanager.orchestrator.validators.patch_validator import (
|
||||
PatchStrategyValidator
|
||||
KubernetesStrategyValidator,
|
||||
)
|
||||
from dcmanager.orchestrator.validators.patch_validator import PatchStrategyValidator
|
||||
from dcmanager.orchestrator.validators.prestage_validator import (
|
||||
PrestageStrategyValidator
|
||||
PrestageStrategyValidator,
|
||||
)
|
||||
from dcmanager.orchestrator.validators.sw_deploy_validator import (
|
||||
SoftwareDeployStrategyValidator
|
||||
SoftwareDeployStrategyValidator,
|
||||
)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -64,10 +62,11 @@ class SwUpdateManager(manager.Manager):
|
||||
"""Manages tasks related to software updates."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
LOG.debug('SwUpdateManager initialization...')
|
||||
LOG.debug("SwUpdateManager initialization...")
|
||||
|
||||
super(SwUpdateManager, self).__init__(service_name="sw_update_manager",
|
||||
*args, **kwargs)
|
||||
super(SwUpdateManager, self).__init__(
|
||||
service_name="sw_update_manager", *args, **kwargs
|
||||
)
|
||||
# Used to protect strategies when an atomic read/update is required.
|
||||
self.strategy_lock = threading.Lock()
|
||||
|
||||
@ -79,32 +78,38 @@ class SwUpdateManager(manager.Manager):
|
||||
|
||||
# - software orchestration thread
|
||||
self.software_orch_thread = SoftwareOrchThread(
|
||||
self.strategy_lock, self.audit_rpc_client)
|
||||
self.strategy_lock, self.audit_rpc_client
|
||||
)
|
||||
self.software_orch_thread.start()
|
||||
|
||||
# - patch orchestration thread
|
||||
self.patch_orch_thread = PatchOrchThread(
|
||||
self.strategy_lock, self.audit_rpc_client)
|
||||
self.strategy_lock, self.audit_rpc_client
|
||||
)
|
||||
self.patch_orch_thread.start()
|
||||
|
||||
# - fw update orchestration thread
|
||||
self.fw_update_orch_thread = FwUpdateOrchThread(
|
||||
self.strategy_lock, self.audit_rpc_client)
|
||||
self.strategy_lock, self.audit_rpc_client
|
||||
)
|
||||
self.fw_update_orch_thread.start()
|
||||
|
||||
# - kube upgrade orchestration thread
|
||||
self.kube_upgrade_orch_thread = KubeUpgradeOrchThread(
|
||||
self.strategy_lock, self.audit_rpc_client)
|
||||
self.strategy_lock, self.audit_rpc_client
|
||||
)
|
||||
self.kube_upgrade_orch_thread.start()
|
||||
|
||||
# - kube rootca update orchestration thread
|
||||
self.kube_rootca_update_orch_thread = KubeRootcaUpdateOrchThread(
|
||||
self.strategy_lock, self.audit_rpc_client)
|
||||
self.strategy_lock, self.audit_rpc_client
|
||||
)
|
||||
self.kube_rootca_update_orch_thread.start()
|
||||
|
||||
# - prestage orchestration thread
|
||||
self.prestage_orch_thread = PrestageOrchThread(
|
||||
self.strategy_lock, self.audit_rpc_client)
|
||||
self.strategy_lock, self.audit_rpc_client
|
||||
)
|
||||
self.prestage_orch_thread.start()
|
||||
|
||||
self.strategy_validators = {
|
||||
@ -113,7 +118,7 @@ class SwUpdateManager(manager.Manager):
|
||||
consts.SW_UPDATE_TYPE_KUBERNETES: KubernetesStrategyValidator(),
|
||||
consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE: KubeRootCaStrategyValidator(),
|
||||
consts.SW_UPDATE_TYPE_PATCH: PatchStrategyValidator(),
|
||||
consts.SW_UPDATE_TYPE_PRESTAGE: PrestageStrategyValidator()
|
||||
consts.SW_UPDATE_TYPE_PRESTAGE: PrestageStrategyValidator(),
|
||||
}
|
||||
|
||||
def stop(self):
|
||||
@ -174,24 +179,22 @@ class SwUpdateManager(manager.Manager):
|
||||
if expiry_date:
|
||||
is_valid, reason = utils.validate_expiry_date(expiry_date)
|
||||
if not is_valid:
|
||||
raise exceptions.BadRequest(resource='strategy',
|
||||
msg=reason)
|
||||
raise exceptions.BadRequest(resource="strategy", msg=reason)
|
||||
if subject:
|
||||
is_valid, reason = \
|
||||
utils.validate_certificate_subject(subject)
|
||||
is_valid, reason = utils.validate_certificate_subject(subject)
|
||||
if not is_valid:
|
||||
raise exceptions.BadRequest(resource='strategy',
|
||||
msg=reason)
|
||||
raise exceptions.BadRequest(resource="strategy", msg=reason)
|
||||
if cert_file:
|
||||
if expiry_date or subject:
|
||||
raise exceptions.BadRequest(
|
||||
resource='strategy',
|
||||
msg='Invalid extra args.'
|
||||
' <cert-file> cannot be specified'
|
||||
' along with <subject> or <expiry-date>.')
|
||||
resource="strategy",
|
||||
msg=(
|
||||
"Invalid extra args. <cert-file> cannot be specified "
|
||||
"along with <subject> or <expiry-date>."
|
||||
),
|
||||
)
|
||||
# copy the cert-file to the vault
|
||||
vault_file = self._vault_upload(consts.CERTS_VAULT_DIR,
|
||||
cert_file)
|
||||
vault_file = self._vault_upload(consts.CERTS_VAULT_DIR, cert_file)
|
||||
# update extra_args with the new path (in the vault)
|
||||
extra_args[consts.EXTRA_ARGS_CERT_FILE] = vault_file
|
||||
|
||||
@ -199,8 +202,7 @@ class SwUpdateManager(manager.Manager):
|
||||
if extra_args:
|
||||
# cert-file extra_arg needs vault handling for kube rootca update
|
||||
if strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
|
||||
cert_file = extra_args.get(
|
||||
consts.EXTRA_ARGS_CERT_FILE)
|
||||
cert_file = extra_args.get(consts.EXTRA_ARGS_CERT_FILE)
|
||||
if cert_file:
|
||||
# remove this cert file from the vault
|
||||
self._vault_remove(consts.CERTS_VAULT_DIR, cert_file)
|
||||
@ -226,26 +228,26 @@ class SwUpdateManager(manager.Manager):
|
||||
"Failed creating software update strategy of type "
|
||||
f"{payload['type']}. {msg}"
|
||||
)
|
||||
raise exceptions.BadRequest(resource='strategy', msg=msg)
|
||||
raise exceptions.BadRequest(resource="strategy", msg=msg)
|
||||
|
||||
single_group = None
|
||||
subcloud_group = payload.get('subcloud_group')
|
||||
subcloud_group = payload.get("subcloud_group")
|
||||
|
||||
if subcloud_group:
|
||||
single_group = utils.subcloud_group_get_by_ref(context, subcloud_group)
|
||||
subcloud_apply_type = single_group.update_apply_type
|
||||
max_parallel_subclouds = single_group.max_parallel_subclouds
|
||||
else:
|
||||
subcloud_apply_type = payload.get('subcloud-apply-type')
|
||||
max_parallel_subclouds_str = payload.get('max-parallel-subclouds')
|
||||
subcloud_apply_type = payload.get("subcloud-apply-type")
|
||||
max_parallel_subclouds_str = payload.get("max-parallel-subclouds")
|
||||
|
||||
if not max_parallel_subclouds_str:
|
||||
max_parallel_subclouds = None
|
||||
else:
|
||||
max_parallel_subclouds = int(max_parallel_subclouds_str)
|
||||
|
||||
stop_on_failure = payload.get('stop-on-failure') in ['true']
|
||||
force = payload.get('force') in ['true']
|
||||
stop_on_failure = payload.get("stop-on-failure") in ["true"]
|
||||
force = payload.get("force") in ["true"]
|
||||
|
||||
installed_releases = []
|
||||
software_version = None
|
||||
@ -261,20 +263,20 @@ class SwUpdateManager(manager.Manager):
|
||||
for_sw_deploy = False
|
||||
|
||||
# Has the user specified a specific subcloud?
|
||||
cloud_name = payload.get('cloud_name')
|
||||
strategy_type = payload.get('type')
|
||||
cloud_name = payload.get("cloud_name")
|
||||
strategy_type = payload.get("type")
|
||||
prestage_global_validated = False
|
||||
if cloud_name:
|
||||
# Make sure subcloud exists
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context, cloud_name)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
msg = f'Subcloud {cloud_name} does not exist'
|
||||
msg = f"Subcloud {cloud_name} does not exist"
|
||||
LOG.error(
|
||||
"Failed creating software update strategy of type "
|
||||
f"{payload['type']}. {msg}"
|
||||
)
|
||||
raise exceptions.BadRequest(resource='strategy', msg=msg)
|
||||
raise exceptions.BadRequest(resource="strategy", msg=msg)
|
||||
|
||||
# TODO(rlima): move prestage to its validator
|
||||
if strategy_type == consts.SW_UPDATE_TYPE_PRESTAGE:
|
||||
@ -282,23 +284,19 @@ class SwUpdateManager(manager.Manager):
|
||||
try:
|
||||
prestage.global_prestage_validate(payload)
|
||||
prestage_global_validated = True
|
||||
installed_releases = (
|
||||
utils.get_systemcontroller_installed_releases()
|
||||
)
|
||||
installed_releases = utils.get_systemcontroller_installed_releases()
|
||||
prestage.initial_subcloud_validate(
|
||||
subcloud,
|
||||
installed_releases,
|
||||
software_major_release,
|
||||
for_sw_deploy
|
||||
for_sw_deploy,
|
||||
)
|
||||
except exceptions.PrestagePreCheckFailedException as ex:
|
||||
raise exceptions.BadRequest(resource='strategy',
|
||||
msg=str(ex))
|
||||
raise exceptions.BadRequest(resource="strategy", msg=str(ex))
|
||||
|
||||
else:
|
||||
self.strategy_validators[strategy_type].\
|
||||
validate_strategy_requirements(
|
||||
context, subcloud.id, subcloud.name, force
|
||||
self.strategy_validators[strategy_type].validate_strategy_requirements(
|
||||
context, subcloud.id, subcloud.name, force
|
||||
)
|
||||
|
||||
extra_args = None
|
||||
@ -308,20 +306,21 @@ class SwUpdateManager(manager.Manager):
|
||||
try:
|
||||
prestage.global_prestage_validate(payload)
|
||||
except exceptions.PrestagePreCheckFailedException as ex:
|
||||
raise exceptions.BadRequest(
|
||||
resource='strategy',
|
||||
msg=str(ex))
|
||||
raise exceptions.BadRequest(resource="strategy", msg=str(ex))
|
||||
|
||||
extra_args = {
|
||||
consts.EXTRA_ARGS_SYSADMIN_PASSWORD:
|
||||
payload.get(consts.EXTRA_ARGS_SYSADMIN_PASSWORD),
|
||||
consts.EXTRA_ARGS_SYSADMIN_PASSWORD: payload.get(
|
||||
consts.EXTRA_ARGS_SYSADMIN_PASSWORD
|
||||
),
|
||||
consts.EXTRA_ARGS_FORCE: force,
|
||||
consts.PRESTAGE_SOFTWARE_VERSION:
|
||||
consts.PRESTAGE_SOFTWARE_VERSION: (
|
||||
software_version if software_version else SW_VERSION
|
||||
),
|
||||
}
|
||||
else:
|
||||
extra_args = self.strategy_validators[strategy_type].\
|
||||
build_extra_args(payload)
|
||||
extra_args = self.strategy_validators[strategy_type].build_extra_args(
|
||||
payload
|
||||
)
|
||||
|
||||
# Don't create a strategy if any of the subclouds is online and the
|
||||
# relevant sync status is unknown. Offline subcloud is skipped unless
|
||||
@ -359,14 +358,12 @@ class SwUpdateManager(manager.Manager):
|
||||
f"Excluding subcloud from prestage strategy: {subcloud.name}"
|
||||
)
|
||||
else:
|
||||
count_invalid_subclouds = (
|
||||
db_api.subcloud_count_invalid_for_strategy_type(
|
||||
context,
|
||||
self.strategy_validators[strategy_type].endpoint_type,
|
||||
single_group.id if subcloud_group else None,
|
||||
cloud_name,
|
||||
force and strategy_type == consts.SW_UPDATE_TYPE_SOFTWARE
|
||||
)
|
||||
count_invalid_subclouds = db_api.subcloud_count_invalid_for_strategy_type(
|
||||
context,
|
||||
self.strategy_validators[strategy_type].endpoint_type,
|
||||
single_group.id if subcloud_group else None,
|
||||
cloud_name,
|
||||
force and strategy_type == consts.SW_UPDATE_TYPE_SOFTWARE,
|
||||
)
|
||||
if count_invalid_subclouds > 0:
|
||||
msg = (
|
||||
@ -424,13 +421,13 @@ class SwUpdateManager(manager.Manager):
|
||||
|
||||
for subcloud, sync_status in valid_subclouds:
|
||||
if (
|
||||
force and
|
||||
subcloud.availability_status ==
|
||||
dccommon_consts.AVAILABILITY_OFFLINE
|
||||
force
|
||||
and subcloud.availability_status
|
||||
== dccommon_consts.AVAILABILITY_OFFLINE
|
||||
):
|
||||
if (
|
||||
sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC or
|
||||
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||
sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
|
||||
or sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||
):
|
||||
filtered_valid_subclouds.append((subcloud, sync_status))
|
||||
elif sync_status == dccommon_consts.SYNC_STATUS_OUT_OF_SYNC:
|
||||
@ -441,12 +438,12 @@ class SwUpdateManager(manager.Manager):
|
||||
if not valid_subclouds:
|
||||
# handle extra_args processing such as removing from the vault
|
||||
self._process_extra_args_deletion(strategy_type, extra_args)
|
||||
msg = 'Strategy has no steps to apply'
|
||||
msg = "Strategy has no steps to apply"
|
||||
LOG.error(
|
||||
"Failed creating software update strategy of type "
|
||||
f"{payload['type']}. {msg}"
|
||||
)
|
||||
raise exceptions.BadRequest(resource='strategy', msg=msg)
|
||||
raise exceptions.BadRequest(resource="strategy", msg=msg)
|
||||
|
||||
# Create the strategy
|
||||
strategy = db_api.sw_update_strategy_create(
|
||||
@ -463,7 +460,7 @@ class SwUpdateManager(manager.Manager):
|
||||
[subcloud.id for subcloud, sync_status in valid_subclouds],
|
||||
stage=consts.STAGE_SUBCLOUD_ORCHESTRATION_CREATED,
|
||||
state=consts.STRATEGY_STATE_INITIAL,
|
||||
details=''
|
||||
details="",
|
||||
)
|
||||
|
||||
LOG.info(
|
||||
@ -484,33 +481,34 @@ class SwUpdateManager(manager.Manager):
|
||||
# The strategy object is common to all workers (patch, upgrades, etc)
|
||||
with self.strategy_lock:
|
||||
# Retrieve the existing strategy from the database
|
||||
sw_update_strategy = \
|
||||
db_api.sw_update_strategy_get(context, update_type=update_type)
|
||||
sw_update_strategy = db_api.sw_update_strategy_get(
|
||||
context, update_type=update_type
|
||||
)
|
||||
|
||||
# Semantic checking
|
||||
if sw_update_strategy.state not in [
|
||||
consts.SW_UPDATE_STATE_INITIAL,
|
||||
consts.SW_UPDATE_STATE_COMPLETE,
|
||||
consts.SW_UPDATE_STATE_FAILED,
|
||||
consts.SW_UPDATE_STATE_ABORTED]:
|
||||
consts.SW_UPDATE_STATE_INITIAL,
|
||||
consts.SW_UPDATE_STATE_COMPLETE,
|
||||
consts.SW_UPDATE_STATE_FAILED,
|
||||
consts.SW_UPDATE_STATE_ABORTED,
|
||||
]:
|
||||
raise exceptions.BadRequest(
|
||||
resource='strategy',
|
||||
msg='Strategy in state %s cannot be deleted' %
|
||||
sw_update_strategy.state)
|
||||
resource="strategy",
|
||||
msg="Strategy in state %s cannot be deleted"
|
||||
% sw_update_strategy.state,
|
||||
)
|
||||
|
||||
# Set the state to deleting, which will trigger the orchestration
|
||||
# to delete it...
|
||||
sw_update_strategy = db_api.sw_update_strategy_update(
|
||||
context,
|
||||
state=consts.SW_UPDATE_STATE_DELETING,
|
||||
update_type=update_type)
|
||||
context, state=consts.SW_UPDATE_STATE_DELETING, update_type=update_type
|
||||
)
|
||||
# handle extra_args processing such as removing from the vault
|
||||
self._process_extra_args_deletion(
|
||||
sw_update_strategy.type, sw_update_strategy.extra_args
|
||||
)
|
||||
|
||||
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(
|
||||
sw_update_strategy)
|
||||
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(sw_update_strategy)
|
||||
return strategy_dict
|
||||
|
||||
def apply_sw_update_strategy(self, context, update_type=None):
|
||||
@ -524,24 +522,24 @@ class SwUpdateManager(manager.Manager):
|
||||
# Ensure our read/update of the strategy is done without interference
|
||||
with self.strategy_lock:
|
||||
# Retrieve the existing strategy from the database
|
||||
sw_update_strategy = \
|
||||
db_api.sw_update_strategy_get(context, update_type=update_type)
|
||||
sw_update_strategy = db_api.sw_update_strategy_get(
|
||||
context, update_type=update_type
|
||||
)
|
||||
|
||||
# Semantic checking
|
||||
if sw_update_strategy.state != consts.SW_UPDATE_STATE_INITIAL:
|
||||
raise exceptions.BadRequest(
|
||||
resource='strategy',
|
||||
msg='Strategy in state %s cannot be applied' %
|
||||
sw_update_strategy.state)
|
||||
resource="strategy",
|
||||
msg="Strategy in state %s cannot be applied"
|
||||
% sw_update_strategy.state,
|
||||
)
|
||||
|
||||
# Set the state to applying, which will trigger the orchestration
|
||||
# to begin...
|
||||
sw_update_strategy = db_api.sw_update_strategy_update(
|
||||
context,
|
||||
state=consts.SW_UPDATE_STATE_APPLYING,
|
||||
update_type=update_type)
|
||||
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(
|
||||
sw_update_strategy)
|
||||
context, state=consts.SW_UPDATE_STATE_APPLYING, update_type=update_type
|
||||
)
|
||||
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(sw_update_strategy)
|
||||
return strategy_dict
|
||||
|
||||
def abort_sw_update_strategy(self, context, update_type=None):
|
||||
@ -555,20 +553,22 @@ class SwUpdateManager(manager.Manager):
|
||||
# Ensure our read/update of the strategy is done without interference
|
||||
with self.strategy_lock:
|
||||
# Retrieve the existing strategy from the database
|
||||
sw_update_strategy = \
|
||||
db_api.sw_update_strategy_get(context, update_type=update_type)
|
||||
sw_update_strategy = db_api.sw_update_strategy_get(
|
||||
context, update_type=update_type
|
||||
)
|
||||
|
||||
# Semantic checking
|
||||
if sw_update_strategy.state != consts.SW_UPDATE_STATE_APPLYING:
|
||||
raise exceptions.BadRequest(
|
||||
resource='strategy',
|
||||
msg='Strategy in state %s cannot be aborted' %
|
||||
sw_update_strategy.state)
|
||||
resource="strategy",
|
||||
msg="Strategy in state %s cannot be aborted"
|
||||
% sw_update_strategy.state,
|
||||
)
|
||||
|
||||
# Set the state to abort requested, which will trigger
|
||||
# the orchestration to abort...
|
||||
sw_update_strategy = db_api.sw_update_strategy_update(
|
||||
context, state=consts.SW_UPDATE_STATE_ABORT_REQUESTED)
|
||||
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(
|
||||
sw_update_strategy)
|
||||
context, state=consts.SW_UPDATE_STATE_ABORT_REQUESTED
|
||||
)
|
||||
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(sw_update_strategy)
|
||||
return strategy_dict
|
||||
|
@ -44,14 +44,13 @@ class StrategyValidationBase(object):
|
||||
|
||||
if subcloud_status.sync_status == dccommon_consts.SYNC_STATUS_IN_SYNC:
|
||||
msg = (
|
||||
f'Subcloud {subcloud_name} does not require '
|
||||
f'{self.endpoint_type} update'
|
||||
f"Subcloud {subcloud_name} does not require {self.endpoint_type} update"
|
||||
)
|
||||
LOG.error(
|
||||
"Failed creating software update strategy of type "
|
||||
f"{self.endpoint_type}. {msg}"
|
||||
)
|
||||
raise exceptions.BadRequest(resource='strategy', msg=msg)
|
||||
raise exceptions.BadRequest(resource="strategy", msg=msg)
|
||||
|
||||
def build_extra_args(self, payload):
|
||||
"""Builds the extra args for a strategy
|
||||
|
@ -37,8 +37,7 @@ class KubeRootCaStrategyValidator(StrategyValidationBase):
|
||||
"""
|
||||
|
||||
return {
|
||||
consts.EXTRA_ARGS_EXPIRY_DATE:
|
||||
payload.get(consts.EXTRA_ARGS_EXPIRY_DATE),
|
||||
consts.EXTRA_ARGS_EXPIRY_DATE: payload.get(consts.EXTRA_ARGS_EXPIRY_DATE),
|
||||
consts.EXTRA_ARGS_SUBJECT: payload.get(consts.EXTRA_ARGS_SUBJECT),
|
||||
consts.EXTRA_ARGS_CERT_FILE: payload.get(consts.EXTRA_ARGS_CERT_FILE),
|
||||
}
|
||||
@ -54,6 +53,6 @@ class KubeRootCaStrategyValidator(StrategyValidationBase):
|
||||
if self.accepts_force and force:
|
||||
return [
|
||||
dccommon_consts.SYNC_STATUS_IN_SYNC,
|
||||
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
|
||||
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||
]
|
||||
return [dccommon_consts.SYNC_STATUS_OUT_OF_SYNC]
|
||||
|
@ -51,6 +51,6 @@ class KubernetesStrategyValidator(StrategyValidationBase):
|
||||
if self.accepts_force and force:
|
||||
return [
|
||||
dccommon_consts.SYNC_STATUS_IN_SYNC,
|
||||
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
|
||||
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||
]
|
||||
return [dccommon_consts.SYNC_STATUS_OUT_OF_SYNC]
|
||||
|
@ -35,8 +35,8 @@ class PatchStrategyValidator(StrategyValidationBase):
|
||||
:param payload: strategy request payload
|
||||
"""
|
||||
|
||||
upload_only_bool = payload.get(consts.EXTRA_ARGS_UPLOAD_ONLY) == 'true'
|
||||
upload_only_bool = payload.get(consts.EXTRA_ARGS_UPLOAD_ONLY) == "true"
|
||||
return {
|
||||
consts.EXTRA_ARGS_UPLOAD_ONLY: upload_only_bool,
|
||||
consts.EXTRA_ARGS_PATCH: payload.get(consts.EXTRA_ARGS_PATCH)
|
||||
consts.EXTRA_ARGS_PATCH: payload.get(consts.EXTRA_ARGS_PATCH),
|
||||
}
|
||||
|
@ -35,9 +35,7 @@ class SoftwareDeployStrategyValidator(StrategyValidationBase):
|
||||
:param payload: strategy request payload
|
||||
"""
|
||||
|
||||
return {
|
||||
consts.EXTRA_ARGS_RELEASE_ID: payload.get(consts.EXTRA_ARGS_RELEASE_ID)
|
||||
}
|
||||
return {consts.EXTRA_ARGS_RELEASE_ID: payload.get(consts.EXTRA_ARGS_RELEASE_ID)}
|
||||
|
||||
def build_availability_status_filter(self, force):
|
||||
"""Builds the availability status filter for valid subclouds
|
||||
@ -68,6 +66,6 @@ class SoftwareDeployStrategyValidator(StrategyValidationBase):
|
||||
if force:
|
||||
return [
|
||||
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||
dccommon_consts.SYNC_STATUS_UNKNOWN
|
||||
dccommon_consts.SYNC_STATUS_UNKNOWN,
|
||||
]
|
||||
return [dccommon_consts.SYNC_STATUS_OUT_OF_SYNC]
|
||||
|
@ -28,6 +28,7 @@ formatted_modules = [
|
||||
"dcmanager/audit",
|
||||
"dcmanager/common",
|
||||
"dcmanager/db",
|
||||
"dcmanager/orchestrator",
|
||||
]
|
||||
|
||||
|
||||
|
@ -68,12 +68,13 @@ commands = oslo_debug_helper {posargs}
|
||||
show-source = True
|
||||
max-line-length = 88
|
||||
# Suppressed flake8 codes
|
||||
# E203 whitespace before ':'; conflict with Black
|
||||
# E731 do not assign a lambda expression, use a def
|
||||
# H301 one import per line; conflict with Black
|
||||
# W503 line break before binary operator; conflict with Black
|
||||
# W504 line break after binary operator
|
||||
# W605 invalid escape sequence
|
||||
# E731 do not assign a lambda expression, use a def
|
||||
ignore = H301,W503,W504,W605,E731
|
||||
ignore = E203,E731,H301,W503,W504,W605
|
||||
builtins = _
|
||||
|
||||
[testenv:genconfig]
|
||||
|
Loading…
x
Reference in New Issue
Block a user