Clean up endpoint_cache
This commit moves the subcloud build endpoint methods into utils.py, improving code organization and reusability. Additionally, endpoint URLs have been relocated to the consts.py file. Note: TODO tasks have been added to address the removal of patching endpoints in future releases. Test Plan: - PASS: All unit tests successful. Story: 2011149 Task: 50825 Change-Id: Ie560a20d17c4a96b6a6c7252641bd3701f6ca57b Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
parent
1dafaf03a2
commit
c9f6ef1bf8
@ -91,20 +91,32 @@ NEUTRON_QUOTA_FIELDS = (
|
||||
"security_group_rule",
|
||||
)
|
||||
|
||||
ENDPOINT_TYPE_PLATFORM = "platform"
|
||||
ENDPOINT_TYPE_PATCHING = "patching"
|
||||
ENDPOINT_TYPE_IDENTITY = "identity"
|
||||
ENDPOINT_TYPE_FM = "faultmanagement"
|
||||
ENDPOINT_TYPE_NFV = "nfv"
|
||||
ENDPOINT_TYPE_SOFTWARE = "usm"
|
||||
ENDPOINT_TYPE_LOAD = "load"
|
||||
# Endpoint services names
|
||||
ENDPOINT_NAME_DCAGENT = "dcagent"
|
||||
ENDPOINT_NAME_FM = "fm"
|
||||
ENDPOINT_NAME_KEYSTONE = "keystone"
|
||||
ENDPOINT_NAME_SYSINV = "sysinv"
|
||||
ENDPOINT_NAME_USM = "usm"
|
||||
ENDPOINT_NAME_VIM = "vim"
|
||||
|
||||
# Endpoint services types
|
||||
ENDPOINT_TYPE_DCAGENT = "dcagent"
|
||||
ENDPOINT_TYPE_DC_CERT = "dc-cert"
|
||||
ENDPOINT_TYPE_FIRMWARE = "firmware"
|
||||
ENDPOINT_TYPE_IDENTITY = "identity"
|
||||
ENDPOINT_TYPE_KUBERNETES = "kubernetes"
|
||||
ENDPOINT_TYPE_KUBE_ROOTCA = "kube-rootca"
|
||||
ENDPOINT_TYPE_USM = "usm"
|
||||
ENDPOINT_TYPE_PLATFORM = "platform"
|
||||
ENDPOINT_TYPE_SOFTWARE = "usm"
|
||||
ENDPOINT_TYPE_FM = "faultmanagement"
|
||||
ENDPOINT_TYPE_NFV = "nfv"
|
||||
# TODO(nicodemos): Remove patching/load after patching is no longer supported
|
||||
ENDPOINT_TYPE_LOAD = "load"
|
||||
ENDPOINT_TYPE_PATCHING = "patching"
|
||||
|
||||
# All endpoint types
|
||||
# TODO(nicodemos): Remove patching/load after is no longer supported
|
||||
ENDPOINT_TYPES_LIST = [
|
||||
ENDPOINT_TYPE_PLATFORM,
|
||||
ENDPOINT_TYPE_PATCHING,
|
||||
@ -118,6 +130,7 @@ ENDPOINT_TYPES_LIST = [
|
||||
]
|
||||
|
||||
# All endpoint audit requests
|
||||
# TODO(nicodemos): Remove patching/load after is no longer supported
|
||||
# TODO(nicodemos): The ENDPOINT_TYPE_SOFTWARE will use the 'spare_audit_requested'
|
||||
# temporarily until the USM feature is fully complete. Afterward, the software audit
|
||||
# will replace the patch audit.
|
||||
@ -130,6 +143,17 @@ ENDPOINT_AUDIT_REQUESTS = {
|
||||
ENDPOINT_TYPE_SOFTWARE: "spare_audit_requested",
|
||||
}
|
||||
|
||||
# TODO(nicodemos): Remove patching/load after is no longer supported
|
||||
ENDPOINT_URLS = {
|
||||
ENDPOINT_NAME_DCAGENT: "https://{}:8326",
|
||||
ENDPOINT_NAME_FM: "https://{}:18003",
|
||||
ENDPOINT_NAME_KEYSTONE: "https://{}:5001/v3",
|
||||
ENDPOINT_TYPE_PATCHING: "https://{}:5492",
|
||||
ENDPOINT_NAME_SYSINV: "https://{}:6386/v1",
|
||||
ENDPOINT_NAME_USM: "https://{}:5498",
|
||||
ENDPOINT_NAME_VIM: "https://{}:4546",
|
||||
}
|
||||
|
||||
BASE_AUDIT = "base_audit"
|
||||
FIRMWARE_AUDIT = "firmware_audit"
|
||||
KUBERNETES_AUDIT = "kubernetes_audit"
|
||||
|
@ -16,7 +16,6 @@
|
||||
#
|
||||
|
||||
import collections
|
||||
|
||||
from typing import Callable
|
||||
from typing import List
|
||||
from typing import Tuple
|
||||
@ -25,83 +24,18 @@ from typing import Union
|
||||
from keystoneauth1.identity import v3
|
||||
from keystoneauth1 import loading
|
||||
from keystoneauth1 import session
|
||||
import netaddr
|
||||
|
||||
from keystoneclient.v3 import client as ks_client
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from dccommon import consts
|
||||
from dccommon.utils import is_token_expiring_soon
|
||||
|
||||
from dccommon import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
LOCK_NAME = "dc-keystone-endpoint-cache"
|
||||
|
||||
ENDPOINT_URLS = {
|
||||
"dcagent": "https://{}:8326",
|
||||
"fm": "https://{}:18003",
|
||||
"keystone": "https://{}:5001/v3",
|
||||
"patching": "https://{}:5492",
|
||||
"sysinv": "https://{}:6386/v1",
|
||||
"usm": "https://{}:5498",
|
||||
"vim": "https://{}:4546",
|
||||
}
|
||||
|
||||
|
||||
def build_subcloud_endpoint_map(ip: str) -> dict:
|
||||
"""Builds a mapping of service endpoints for a given IP address.
|
||||
|
||||
:param ip: The IP address for which service endpoints need to be mapped.
|
||||
:type ip: str
|
||||
:return: A dictionary containing service names as keys and formatted
|
||||
endpoint URLs as values.
|
||||
:rtype: dict
|
||||
"""
|
||||
endpoint_map = {}
|
||||
for service, endpoint in ENDPOINT_URLS.items():
|
||||
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
|
||||
endpoint_map[service] = endpoint.format(formatted_ip)
|
||||
return endpoint_map
|
||||
|
||||
|
||||
def build_subcloud_endpoints(subcloud_mgmt_ips: dict) -> dict:
|
||||
"""Builds a dictionary of service endpoints for multiple subcloud management IPs.
|
||||
|
||||
:param subcloud_mgmt_ips: A dictionary containing subcloud regions as keys
|
||||
and the corresponding management IP as value.
|
||||
:type subcloud_mgmt_ips: dict
|
||||
:return: A dictionary with subcloud regions as keys and their respective
|
||||
service endpoints as values.
|
||||
:rtype: dict
|
||||
"""
|
||||
subcloud_endpoints = {}
|
||||
for region, ip in subcloud_mgmt_ips.items():
|
||||
subcloud_endpoints[region] = build_subcloud_endpoint_map(ip)
|
||||
return subcloud_endpoints
|
||||
|
||||
|
||||
def build_subcloud_endpoint(ip: str, service: str) -> str:
|
||||
"""Builds a service endpoint for a given IP address.
|
||||
|
||||
:param ip: The IP address for constructing the service endpoint.
|
||||
:type ip: str
|
||||
:param service: The service of the endpoint
|
||||
:type service: str
|
||||
:return: The service endpoint URL.
|
||||
:type: str
|
||||
"""
|
||||
endpoint = ENDPOINT_URLS.get(service, None)
|
||||
if endpoint:
|
||||
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
|
||||
endpoint = endpoint.format(formatted_ip)
|
||||
return endpoint
|
||||
|
||||
|
||||
class EndpointCache(object):
|
||||
"""Cache for storing endpoint information.
|
||||
@ -161,7 +95,7 @@ class EndpointCache(object):
|
||||
):
|
||||
LOG.info("Initializing and caching subcloud endpoints")
|
||||
# pylint: disable=not-callable
|
||||
EndpointCache.subcloud_endpoints = build_subcloud_endpoints(
|
||||
EndpointCache.subcloud_endpoints = utils.build_subcloud_endpoints(
|
||||
EndpointCache.fetch_subcloud_ips()
|
||||
)
|
||||
LOG.info("Finished initializing and caching subcloud endpoints")
|
||||
@ -423,7 +357,7 @@ class EndpointCache(object):
|
||||
endpoint URLs as values.
|
||||
:rtype: dict
|
||||
"""
|
||||
endpoint_map = build_subcloud_endpoint_map(management_ip)
|
||||
endpoint_map = utils.build_subcloud_endpoint_map(management_ip)
|
||||
cls.update_master_service_endpoint_region(region_name, endpoint_map)
|
||||
return endpoint_map
|
||||
|
||||
@ -441,7 +375,7 @@ class EndpointCache(object):
|
||||
)
|
||||
# pylint: disable-next=not-callable
|
||||
subcloud_ip = EndpointCache.fetch_subcloud_ips(region_name)
|
||||
endpoint_map = build_subcloud_endpoint_map(subcloud_ip)
|
||||
endpoint_map = utils.build_subcloud_endpoint_map(subcloud_ip)
|
||||
# pylint: disable-next=unsupported-assignment-operation
|
||||
EndpointCache.subcloud_endpoints[region_name] = endpoint_map
|
||||
|
||||
@ -460,7 +394,7 @@ class EndpointCache(object):
|
||||
# token is expiring soon
|
||||
token_expiring_soon = False
|
||||
if EndpointCache.master_keystone_client is None or (
|
||||
token_expiring_soon := is_token_expiring_soon(
|
||||
token_expiring_soon := utils.is_token_expiring_soon(
|
||||
token=EndpointCache.master_token
|
||||
)
|
||||
):
|
||||
|
@ -26,6 +26,7 @@ from oslo_config import cfg
|
||||
|
||||
from dccommon import endpoint_cache
|
||||
from dccommon.tests import base
|
||||
from dccommon import utils
|
||||
|
||||
FAKE_REGIONONE_SYSINV_ENDPOINT = "http://[2620:10a:a001:a114::d00]:6385/v1"
|
||||
FAKE_REGIONONE_KEYSTONE_ENDPOINT = "http://[2620:10a:a001:a114::d00]:5000/v3"
|
||||
@ -90,14 +91,7 @@ class EndpointCacheTest(base.DCCommonTestCase):
|
||||
]
|
||||
cfg.CONF.register_opts(auth_uri_opts, "endpoint_cache")
|
||||
|
||||
# Mock the token validator (which is confusing so here is the info)
|
||||
# endpoint_cache.py has an import:
|
||||
# from dccommon.utils import is_token_expiring_soon
|
||||
# so to patch where that function is called we use this syntax:
|
||||
# patch.object(endpoint_cache, 'is_token_expiring_soon')
|
||||
# instead of:
|
||||
# patch.object(dccommon.utils, 'is_token_expiring_soon')
|
||||
p = mock.patch.object(endpoint_cache, "is_token_expiring_soon")
|
||||
p = mock.patch.object(utils, "is_token_expiring_soon")
|
||||
self.mock_is_token_expiring_soon = p.start()
|
||||
self.mock_is_token_expiring_soon.return_value = True
|
||||
self.addCleanup(p.stop)
|
||||
@ -198,7 +192,7 @@ class EndpointCacheTest(base.DCCommonTestCase):
|
||||
ips = ("192.168.1.1", "2620:10a:a001:ac09::7ce0")
|
||||
for ip in ips:
|
||||
expected = self._get_expected_endpoints(ip)
|
||||
result = endpoint_cache.build_subcloud_endpoint_map(ip)
|
||||
result = utils.build_subcloud_endpoint_map(ip)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_subcloud_endpoint_map_fails(self):
|
||||
@ -213,7 +207,7 @@ class EndpointCacheTest(base.DCCommonTestCase):
|
||||
for ip in ips:
|
||||
self.assertRaises(
|
||||
netaddr.AddrFormatError,
|
||||
endpoint_cache.build_subcloud_endpoint_map,
|
||||
utils.build_subcloud_endpoint_map,
|
||||
ip,
|
||||
)
|
||||
|
||||
@ -229,11 +223,11 @@ class EndpointCacheTest(base.DCCommonTestCase):
|
||||
}
|
||||
self.assertEqual(
|
||||
expected_result,
|
||||
endpoint_cache.build_subcloud_endpoints(subcloud_mgmt_ips),
|
||||
utils.build_subcloud_endpoints(subcloud_mgmt_ips),
|
||||
)
|
||||
|
||||
def test_empty_ip_dict_succeeds(self):
|
||||
empty_ips = {}
|
||||
expected_endpoints = {}
|
||||
actual_endpoints = endpoint_cache.build_subcloud_endpoints(empty_ips)
|
||||
actual_endpoints = utils.build_subcloud_endpoints(empty_ips)
|
||||
self.assertEqual(expected_endpoints, actual_endpoints)
|
||||
|
@ -24,6 +24,7 @@ import time
|
||||
from typing import Callable
|
||||
|
||||
from eventlet.green import subprocess
|
||||
import netaddr
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
|
||||
@ -412,3 +413,52 @@ def log_subcloud_msg(
|
||||
if avail_status:
|
||||
prefix += f"Availability: {avail_status}. "
|
||||
log_func(f"{prefix}{msg}")
|
||||
|
||||
|
||||
def build_subcloud_endpoint_map(ip: str) -> dict:
|
||||
"""Builds a mapping of service endpoints for a given IP address.
|
||||
|
||||
:param ip: The IP address for which service endpoints need to be mapped.
|
||||
:type ip: str
|
||||
:return: A dictionary containing service names as keys and formatted
|
||||
endpoint URLs as values.
|
||||
:rtype: dict
|
||||
"""
|
||||
endpoint_map = {}
|
||||
for service, endpoint in consts.ENDPOINT_URLS.items():
|
||||
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
|
||||
endpoint_map[service] = endpoint.format(formatted_ip)
|
||||
return endpoint_map
|
||||
|
||||
|
||||
def build_subcloud_endpoints(subcloud_mgmt_ips: dict) -> dict:
|
||||
"""Builds a dictionary of service endpoints for multiple subcloud management IPs.
|
||||
|
||||
:param subcloud_mgmt_ips: A dictionary containing subcloud regions as keys
|
||||
and the corresponding management IP as value.
|
||||
:type subcloud_mgmt_ips: dict
|
||||
:return: A dictionary with subcloud regions as keys and their respective
|
||||
service endpoints as values.
|
||||
:rtype: dict
|
||||
"""
|
||||
subcloud_endpoints = {}
|
||||
for region, ip in subcloud_mgmt_ips.items():
|
||||
subcloud_endpoints[region] = build_subcloud_endpoint_map(ip)
|
||||
return subcloud_endpoints
|
||||
|
||||
|
||||
def build_subcloud_endpoint(ip: str, service: str) -> str:
|
||||
"""Builds a service endpoint for a given IP address.
|
||||
|
||||
:param ip: The IP address for constructing the service endpoint.
|
||||
:type ip: str
|
||||
:param service: The service of the endpoint
|
||||
:type service: str
|
||||
:return: The service endpoint URL.
|
||||
:type: str
|
||||
"""
|
||||
endpoint = consts.ENDPOINT_URLS.get(service, None)
|
||||
if endpoint:
|
||||
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
|
||||
endpoint = endpoint.format(formatted_ip)
|
||||
return endpoint
|
||||
|
@ -14,6 +14,7 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# TODO(nicodemos): Remove this file after all support to patching is removed
|
||||
from keystoneauth1 import exceptions as keystone_exceptions
|
||||
from oslo_log import log as logging
|
||||
|
||||
@ -22,7 +23,7 @@ from dccommon.drivers.openstack import patching_v1
|
||||
from dccommon.drivers.openstack.patching_v1 import PatchingClient
|
||||
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
|
||||
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
from dccommon.endpoint_cache import build_subcloud_endpoint
|
||||
from dccommon.utils import build_subcloud_endpoint
|
||||
from dcmanager.common import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -11,7 +11,7 @@ from dccommon import consts as dccommon_consts
|
||||
from dccommon.drivers.openstack.keystone_v3 import KeystoneClient as ks_client
|
||||
from dccommon.drivers.openstack import sdk_platform
|
||||
from dccommon.drivers.openstack import software_v1
|
||||
from dccommon.endpoint_cache import build_subcloud_endpoint
|
||||
from dccommon.utils import build_subcloud_endpoint
|
||||
from dccommon.utils import log_subcloud_msg
|
||||
from dcmanager.common import utils
|
||||
|
||||
|
@ -26,8 +26,7 @@ from dccommon.drivers.openstack.fm import FmClient
|
||||
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
|
||||
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
from dccommon import endpoint_cache
|
||||
from dccommon.utils import log_subcloud_msg
|
||||
from dccommon.utils import subcloud_has_dcagent
|
||||
from dccommon import utils as dccommon_utils
|
||||
from dcmanager.audit import alarm_aggregation
|
||||
from dcmanager.audit import base_audit
|
||||
from dcmanager.audit import firmware_audit
|
||||
@ -347,7 +346,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
||||
failures = list()
|
||||
availability_data = dict()
|
||||
endpoint_data = dict()
|
||||
has_dcagent = subcloud_has_dcagent(subcloud.software_version)
|
||||
has_dcagent = dccommon_utils.subcloud_has_dcagent(subcloud.software_version)
|
||||
|
||||
# Set defaults to None and disabled so we will still set disabled
|
||||
# status if we encounter an error.
|
||||
@ -369,7 +368,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
||||
dcagent_client = DcagentClient(
|
||||
subcloud_region,
|
||||
admin_session,
|
||||
endpoint=endpoint_cache.build_subcloud_endpoint(
|
||||
endpoint=dccommon_utils.build_subcloud_endpoint(
|
||||
subcloud_management_ip, "dcagent"
|
||||
),
|
||||
)
|
||||
@ -495,7 +494,9 @@ class SubcloudAuditWorkerManager(manager.Manager):
|
||||
if avail_to_set == dccommon_consts.AVAILABILITY_OFFLINE:
|
||||
inactive_sg = audit_value.get("inactive_sg")
|
||||
msg = f"Inactive service groups: {inactive_sg}"
|
||||
log_subcloud_msg(LOG.debug, msg, subcloud_name, avail_to_set)
|
||||
dccommon_utils.log_subcloud_msg(
|
||||
LOG.debug, msg, subcloud_name, avail_to_set
|
||||
)
|
||||
alarms = audit_value.get("alarms")
|
||||
if alarms:
|
||||
self.alarm_aggr.update_alarm_summary(subcloud_name, alarms)
|
||||
|
@ -341,11 +341,13 @@ EXTRA_ARGS_SUBJECT = "subject"
|
||||
EXTRA_ARGS_SYSADMIN_PASSWORD = "sysadmin_password"
|
||||
EXTRA_ARGS_FORCE = "force"
|
||||
|
||||
# TODO(nicodemos): Remove after patching is no longer supported
|
||||
# extra_args for patching
|
||||
EXTRA_ARGS_UPLOAD_ONLY = "upload-only"
|
||||
EXTRA_ARGS_PATCH_ID = "patch_id"
|
||||
|
||||
# sw_version supported for patching
|
||||
# TODO(nicodemos): Remove after patching is no longer supported
|
||||
# sw_version supported for patching legacy
|
||||
PATCHING_SW_VERSION = "22.12"
|
||||
|
||||
# extra_args for software
|
||||
|
@ -145,6 +145,7 @@ def validate_bootstrap_values(payload: dict):
|
||||
pecan.abort(400, _("external_oam_floating_address required"))
|
||||
|
||||
|
||||
# TODO(nicodemos): Change to verify the releases instead of patching
|
||||
def validate_system_controller_patch_status(operation: str):
|
||||
ks_client = get_ks_client()
|
||||
patching_client = PatchingClient(
|
||||
|
@ -45,15 +45,13 @@ import yaml
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
|
||||
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
from dccommon.endpoint_cache import EndpointCache
|
||||
from dccommon import endpoint_cache
|
||||
from dccommon.exceptions import PlaybookExecutionFailed
|
||||
from dccommon.exceptions import SubcloudNotFound
|
||||
from dccommon import kubeoperator
|
||||
from dccommon.subcloud_enrollment import SubcloudEnrollmentInit
|
||||
from dccommon.subcloud_install import SubcloudInstall
|
||||
from dccommon.utils import AnsiblePlaybook
|
||||
from dccommon.utils import LAST_SW_VERSION_IN_CENTOS
|
||||
from dccommon.utils import send_subcloud_shutdown_signal
|
||||
from dccommon import utils as dccommon_utils
|
||||
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common.consts import INVENTORY_FILE_POSTFIX
|
||||
@ -107,7 +105,7 @@ ANSIBLE_VALIDATE_KEYSTONE_PASSWORD_SCRIPT = (
|
||||
|
||||
USERS_TO_REPLICATE = [
|
||||
"sysinv",
|
||||
"patching",
|
||||
"patching", # TODO(nicodemos): Remove after patching is removed
|
||||
"usm",
|
||||
"vim",
|
||||
"mtce",
|
||||
@ -168,15 +166,6 @@ MAX_PARALLEL_SUBCLOUD_BACKUP_DELETE = 250
|
||||
MAX_PARALLEL_SUBCLOUD_BACKUP_RESTORE = 100
|
||||
CENTRAL_BACKUP_DIR = "/opt/dc-vault/backups"
|
||||
|
||||
ENDPOINT_URLS = {
|
||||
dccommon_consts.ENDPOINT_TYPE_PLATFORM: "https://{}:6386/v1",
|
||||
dccommon_consts.ENDPOINT_TYPE_IDENTITY: "https://{}:5001/v3",
|
||||
dccommon_consts.ENDPOINT_TYPE_PATCHING: "https://{}:5492",
|
||||
dccommon_consts.ENDPOINT_TYPE_FM: "https://{}:18003",
|
||||
dccommon_consts.ENDPOINT_TYPE_NFV: "https://{}:4546",
|
||||
dccommon_consts.ENDPOINT_TYPE_SOFTWARE: "https://{}:5498",
|
||||
}
|
||||
|
||||
# Values for the exponential backoff retry to get subcloud's
|
||||
# certificate secret.
|
||||
MAX_ATTEMPTS_TO_GET_INTERMEDIATE_CA_CERT = 15
|
||||
@ -564,7 +553,7 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
# TODO(yuxing) Remove the validate_keystone_passwords_script when end
|
||||
# the support of rehoming a subcloud with a software version below 22.12
|
||||
if software_version <= LAST_SW_VERSION_IN_CENTOS:
|
||||
if software_version <= dccommon_utils.LAST_SW_VERSION_IN_CENTOS:
|
||||
extra_vars += (
|
||||
" validate_keystone_passwords_script='%s'"
|
||||
% ANSIBLE_VALIDATE_KEYSTONE_PASSWORD_SCRIPT
|
||||
@ -999,7 +988,7 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
# Run the rehome-subcloud playbook
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
|
||||
ansible.run_playbook(log_file, rehome_command)
|
||||
except PlaybookExecutionFailed:
|
||||
msg = (
|
||||
@ -1443,7 +1432,7 @@ class SubcloudManager(manager.Manager):
|
||||
subcloud = utils.update_abort_status(context, subcloud_id, deploy_status)
|
||||
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
|
||||
aborted = ansible.run_abort()
|
||||
if not aborted:
|
||||
LOG.warning(
|
||||
@ -1455,7 +1444,7 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
if subcloud.deploy_status == consts.DEPLOY_STATE_ABORTING_INSTALL:
|
||||
# Send shutdown signal to subcloud
|
||||
send_subcloud_shutdown_signal(subcloud.name)
|
||||
dccommon_utils.send_subcloud_shutdown_signal(subcloud.name)
|
||||
except Exception as ex:
|
||||
LOG.error(
|
||||
"Subcloud deploy abort failed for subcloud %s: %s"
|
||||
@ -1537,7 +1526,7 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
# TODO(Yuxing) remove replicating the smapi user when end the support
|
||||
# of rehoming a subcloud with a software version below 22.12
|
||||
if subcloud.software_version <= LAST_SW_VERSION_IN_CENTOS:
|
||||
if subcloud.software_version <= dccommon_utils.LAST_SW_VERSION_IN_CENTOS:
|
||||
payload["users"]["smapi"] = str(
|
||||
keyring.get_password("smapi", dccommon_consts.SERVICES_USER_NAME)
|
||||
)
|
||||
@ -1693,7 +1682,9 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
# TODO(Yuxing) remove replicating the smapi user when end the support
|
||||
# of rehoming a subcloud with a software version below 22.12
|
||||
if rehoming and subcloud.software_version <= LAST_SW_VERSION_IN_CENTOS:
|
||||
if rehoming and (
|
||||
subcloud.software_version <= dccommon_utils.LAST_SW_VERSION_IN_CENTOS
|
||||
):
|
||||
payload["users"]["smapi"] = str(
|
||||
keyring.get_password("smapi", dccommon_consts.SERVICES_USER_NAME)
|
||||
)
|
||||
@ -2579,7 +2570,7 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
# Run the subcloud backup playbook
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
|
||||
ansible.run_playbook(log_file, backup_command)
|
||||
|
||||
# Decide between complete-local or complete-central
|
||||
@ -2610,7 +2601,7 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
try:
|
||||
# Run the subcloud backup delete playbook
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
|
||||
ansible.run_playbook(log_file, delete_command)
|
||||
|
||||
# Set backup status to unknown after delete, since most recent backup may
|
||||
@ -2657,7 +2648,7 @@ class SubcloudManager(manager.Manager):
|
||||
)
|
||||
# Run the subcloud backup restore playbook
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
|
||||
ansible.run_playbook(
|
||||
log_file, restore_command, timeout=CONF.playbook_timeout
|
||||
)
|
||||
@ -2792,7 +2783,7 @@ class SubcloudManager(manager.Manager):
|
||||
)
|
||||
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
|
||||
aborted = ansible.run_playbook(log_file, config_command)
|
||||
except PlaybookExecutionFailed:
|
||||
msg = utils.find_ansible_error_msg(
|
||||
@ -2900,7 +2891,7 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
LOG.info(f"Starting enroll of subcloud {subcloud.name}")
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
|
||||
ansible.run_playbook(log_file, enroll_command)
|
||||
except PlaybookExecutionFailed:
|
||||
msg = utils.find_ansible_error_msg(
|
||||
@ -2941,7 +2932,7 @@ class SubcloudManager(manager.Manager):
|
||||
# Run the ansible subcloud bootstrap playbook
|
||||
LOG.info("Starting bootstrap of %s" % subcloud.name)
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud.name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
|
||||
aborted = ansible.run_playbook(log_file, bootstrap_command)
|
||||
except PlaybookExecutionFailed:
|
||||
msg = utils.find_ansible_error_msg(
|
||||
@ -3776,7 +3767,7 @@ class SubcloudManager(manager.Manager):
|
||||
)
|
||||
subcloud_id = subcloud.id
|
||||
try:
|
||||
ansible = AnsiblePlaybook(subcloud_name)
|
||||
ansible = dccommon_utils.AnsiblePlaybook(subcloud_name)
|
||||
ansible.run_playbook(log_file, update_command)
|
||||
utils.delete_subcloud_inventory(overrides_file)
|
||||
except PlaybookExecutionFailed:
|
||||
@ -3895,20 +3886,11 @@ class SubcloudManager(manager.Manager):
|
||||
|
||||
def _update_services_endpoint(self, context, payload, subcloud_region, m_ks_client):
|
||||
ip = utils.get_primary_management_start_address(payload)
|
||||
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
|
||||
|
||||
services_endpoints = {
|
||||
"keystone": "https://{}:5001/v3".format(formatted_ip),
|
||||
"sysinv": "https://{}:6386/v1".format(formatted_ip),
|
||||
"fm": "https://{}:18003".format(formatted_ip),
|
||||
"patching": "https://{}:5492".format(formatted_ip),
|
||||
"vim": "https://{}:4546".format(formatted_ip),
|
||||
"usm": "https://{}:5498".format(formatted_ip),
|
||||
}
|
||||
services_endpoints = dccommon_utils.build_subcloud_endpoint_map(ip)
|
||||
|
||||
LOG.info(
|
||||
"Update services endpoint to %s in subcloud region %s"
|
||||
% (formatted_ip, subcloud_region)
|
||||
% (ip, subcloud_region)
|
||||
)
|
||||
# Update service URLs in subcloud endpoint cache
|
||||
self.audit_rpc_client.trigger_subcloud_endpoints_update(
|
||||
@ -3925,7 +3907,7 @@ class SubcloudManager(manager.Manager):
|
||||
)
|
||||
|
||||
# Update dcmanager endpoint cache
|
||||
EndpointCache.update_master_service_endpoint_region(
|
||||
endpoint_cache.EndpointCache.update_master_service_endpoint_region(
|
||||
subcloud_region, services_endpoints
|
||||
)
|
||||
|
||||
|
@ -15,6 +15,8 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
# TODO(nicodemos): Remove this file and all patch states after all support
|
||||
# to patching is removed
|
||||
from oslo_log import log as logging
|
||||
|
||||
from dccommon.drivers.openstack import vim
|
||||
|
@ -10,6 +10,7 @@ It defines methods used in dcmanager orchestrator's to handle the strategy
|
||||
by its type.
|
||||
"""
|
||||
|
||||
# TODO(nicodemos): Remove this file after all support to patching is removed
|
||||
from oslo_log import log as logging
|
||||
|
||||
from dccommon import consts as dccommon_consts
|
||||
|
@ -103,19 +103,36 @@ FAKE_PROJECTS = [
|
||||
|
||||
|
||||
class FakeService(object):
|
||||
def __init__(self, type, id):
|
||||
def __init__(self, name, type, id):
|
||||
self.name = name
|
||||
self.type = type
|
||||
self.id = id
|
||||
|
||||
|
||||
FAKE_SERVICES = [
|
||||
FakeService(dccommon_consts.ENDPOINT_TYPE_PLATFORM, 1),
|
||||
FakeService(dccommon_consts.ENDPOINT_TYPE_IDENTITY, 2),
|
||||
FakeService(dccommon_consts.ENDPOINT_TYPE_PATCHING, 3),
|
||||
FakeService(dccommon_consts.ENDPOINT_TYPE_FM, 4),
|
||||
FakeService(dccommon_consts.ENDPOINT_TYPE_NFV, 5),
|
||||
FakeService(dccommon_consts.ENDPOINT_TYPE_DC_CERT, 6),
|
||||
FakeService(dccommon_consts.ENDPOINT_TYPE_SOFTWARE, 7),
|
||||
FakeService(
|
||||
dccommon_consts.ENDPOINT_NAME_SYSINV, dccommon_consts.ENDPOINT_TYPE_PLATFORM, 1
|
||||
),
|
||||
FakeService(
|
||||
dccommon_consts.ENDPOINT_NAME_KEYSTONE,
|
||||
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
|
||||
2,
|
||||
),
|
||||
FakeService(
|
||||
dccommon_consts.ENDPOINT_TYPE_PATCHING,
|
||||
dccommon_consts.ENDPOINT_TYPE_PATCHING,
|
||||
3,
|
||||
),
|
||||
FakeService(dccommon_consts.ENDPOINT_NAME_FM, dccommon_consts.ENDPOINT_TYPE_FM, 4),
|
||||
FakeService(
|
||||
dccommon_consts.ENDPOINT_NAME_VIM, dccommon_consts.ENDPOINT_TYPE_NFV, 5
|
||||
),
|
||||
FakeService(
|
||||
dccommon_consts.ENDPOINT_TYPE_DC_CERT, dccommon_consts.ENDPOINT_TYPE_DC_CERT, 6
|
||||
),
|
||||
FakeService(
|
||||
dccommon_consts.ENDPOINT_NAME_USM, dccommon_consts.ENDPOINT_TYPE_SOFTWARE, 7
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# TODO(nicodemos): Remove this file after all support to patching is removed
|
||||
|
||||
import glob
|
||||
import json
|
||||
@ -36,7 +37,6 @@ from dcorch.api.proxy.common.service import Middleware
|
||||
from dcorch.api.proxy.common import utils as proxy_utils
|
||||
from dcorch.common import context
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
@ -26,11 +26,10 @@ from oslo_utils import timeutils
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
|
||||
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
from dccommon.endpoint_cache import build_subcloud_endpoint
|
||||
from dccommon import exceptions as dccommon_exceptions
|
||||
from dccommon.utils import build_subcloud_endpoint
|
||||
from dcorch.common import consts
|
||||
from dcorch.common import exceptions
|
||||
|
||||
from dcorch.engine.fernet_key_manager import FERNET_REPO_MASTER_ID
|
||||
from dcorch.engine.fernet_key_manager import FernetKeyManager
|
||||
from dcorch.engine.sync_thread import AUDIT_RESOURCE_EXTRA
|
||||
|
@ -24,8 +24,8 @@ from oslo_utils import timeutils
|
||||
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dccommon.drivers.openstack import sdk_platform as sdk
|
||||
from dccommon.endpoint_cache import build_subcloud_endpoint
|
||||
from dccommon.endpoint_cache import EndpointCache
|
||||
from dccommon.utils import build_subcloud_endpoint
|
||||
from dcdbsync.dbsyncclient import client as dbsyncclient
|
||||
from dcmanager.rpc import client as dcmanager_rpc_client
|
||||
from dcorch.common import consts
|
||||
|
Loading…
x
Reference in New Issue
Block a user