Clean up endpoint_cache

This commit moves the subcloud build endpoint methods into utils.py,
improving code organization and reusability. Additionally, endpoint
URLs have been relocated to the consts.py file.

Note: TODO tasks have been added to address the removal of patching
endpoints in future releases.

Test Plan:
- PASS: All unit tests successful.

Story: 2011149
Task: 50825

Change-Id: Ie560a20d17c4a96b6a6c7252641bd3701f6ca57b
Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
Hugo Brito 2024-08-05 17:28:21 -03:00 committed by Hugo Nicodemos
parent 1dafaf03a2
commit c9f6ef1bf8
16 changed files with 157 additions and 149 deletions

View File

@ -91,20 +91,32 @@ NEUTRON_QUOTA_FIELDS = (
"security_group_rule", "security_group_rule",
) )
ENDPOINT_TYPE_PLATFORM = "platform" # Endpoint services names
ENDPOINT_TYPE_PATCHING = "patching" ENDPOINT_NAME_DCAGENT = "dcagent"
ENDPOINT_TYPE_IDENTITY = "identity" ENDPOINT_NAME_FM = "fm"
ENDPOINT_TYPE_FM = "faultmanagement" ENDPOINT_NAME_KEYSTONE = "keystone"
ENDPOINT_TYPE_NFV = "nfv" ENDPOINT_NAME_SYSINV = "sysinv"
ENDPOINT_TYPE_SOFTWARE = "usm" ENDPOINT_NAME_USM = "usm"
ENDPOINT_TYPE_LOAD = "load" ENDPOINT_NAME_VIM = "vim"
# Endpoint services types
ENDPOINT_TYPE_DCAGENT = "dcagent"
ENDPOINT_TYPE_DC_CERT = "dc-cert" ENDPOINT_TYPE_DC_CERT = "dc-cert"
ENDPOINT_TYPE_FIRMWARE = "firmware" ENDPOINT_TYPE_FIRMWARE = "firmware"
ENDPOINT_TYPE_IDENTITY = "identity"
ENDPOINT_TYPE_KUBERNETES = "kubernetes" ENDPOINT_TYPE_KUBERNETES = "kubernetes"
ENDPOINT_TYPE_KUBE_ROOTCA = "kube-rootca" ENDPOINT_TYPE_KUBE_ROOTCA = "kube-rootca"
ENDPOINT_TYPE_USM = "usm" ENDPOINT_TYPE_USM = "usm"
ENDPOINT_TYPE_PLATFORM = "platform"
ENDPOINT_TYPE_SOFTWARE = "usm"
ENDPOINT_TYPE_FM = "faultmanagement"
ENDPOINT_TYPE_NFV = "nfv"
# TODO(nicodemos): Remove patching/load after patching is no longer supported
ENDPOINT_TYPE_LOAD = "load"
ENDPOINT_TYPE_PATCHING = "patching"
# All endpoint types # All endpoint types
# TODO(nicodemos): Remove patching/load after is no longer supported
ENDPOINT_TYPES_LIST = [ ENDPOINT_TYPES_LIST = [
ENDPOINT_TYPE_PLATFORM, ENDPOINT_TYPE_PLATFORM,
ENDPOINT_TYPE_PATCHING, ENDPOINT_TYPE_PATCHING,
@ -118,6 +130,7 @@ ENDPOINT_TYPES_LIST = [
] ]
# All endpoint audit requests # All endpoint audit requests
# TODO(nicodemos): Remove patching/load after is no longer supported
# TODO(nicodemos): The ENDPOINT_TYPE_SOFTWARE will use the 'spare_audit_requested' # TODO(nicodemos): The ENDPOINT_TYPE_SOFTWARE will use the 'spare_audit_requested'
# temporarily until the USM feature is fully complete. Afterward, the software audit # temporarily until the USM feature is fully complete. Afterward, the software audit
# will replace the patch audit. # will replace the patch audit.
@ -130,6 +143,17 @@ ENDPOINT_AUDIT_REQUESTS = {
ENDPOINT_TYPE_SOFTWARE: "spare_audit_requested", ENDPOINT_TYPE_SOFTWARE: "spare_audit_requested",
} }
# TODO(nicodemos): Remove patching/load after is no longer supported
ENDPOINT_URLS = {
ENDPOINT_NAME_DCAGENT: "https://{}:8326",
ENDPOINT_NAME_FM: "https://{}:18003",
ENDPOINT_NAME_KEYSTONE: "https://{}:5001/v3",
ENDPOINT_TYPE_PATCHING: "https://{}:5492",
ENDPOINT_NAME_SYSINV: "https://{}:6386/v1",
ENDPOINT_NAME_USM: "https://{}:5498",
ENDPOINT_NAME_VIM: "https://{}:4546",
}
BASE_AUDIT = "base_audit" BASE_AUDIT = "base_audit"
FIRMWARE_AUDIT = "firmware_audit" FIRMWARE_AUDIT = "firmware_audit"
KUBERNETES_AUDIT = "kubernetes_audit" KUBERNETES_AUDIT = "kubernetes_audit"

View File

@ -16,7 +16,6 @@
# #
import collections import collections
from typing import Callable from typing import Callable
from typing import List from typing import List
from typing import Tuple from typing import Tuple
@ -25,83 +24,18 @@ from typing import Union
from keystoneauth1.identity import v3 from keystoneauth1.identity import v3
from keystoneauth1 import loading from keystoneauth1 import loading
from keystoneauth1 import session from keystoneauth1 import session
import netaddr
from keystoneclient.v3 import client as ks_client from keystoneclient.v3 import client as ks_client
from oslo_concurrency import lockutils from oslo_concurrency import lockutils
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from dccommon import consts from dccommon import consts
from dccommon.utils import is_token_expiring_soon from dccommon import utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
LOCK_NAME = "dc-keystone-endpoint-cache" LOCK_NAME = "dc-keystone-endpoint-cache"
ENDPOINT_URLS = {
"dcagent": "https://{}:8326",
"fm": "https://{}:18003",
"keystone": "https://{}:5001/v3",
"patching": "https://{}:5492",
"sysinv": "https://{}:6386/v1",
"usm": "https://{}:5498",
"vim": "https://{}:4546",
}
def build_subcloud_endpoint_map(ip: str) -> dict:
"""Builds a mapping of service endpoints for a given IP address.
:param ip: The IP address for which service endpoints need to be mapped.
:type ip: str
:return: A dictionary containing service names as keys and formatted
endpoint URLs as values.
:rtype: dict
"""
endpoint_map = {}
for service, endpoint in ENDPOINT_URLS.items():
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
endpoint_map[service] = endpoint.format(formatted_ip)
return endpoint_map
def build_subcloud_endpoints(subcloud_mgmt_ips: dict) -> dict:
"""Builds a dictionary of service endpoints for multiple subcloud management IPs.
:param subcloud_mgmt_ips: A dictionary containing subcloud regions as keys
and the corresponding management IP as value.
:type subcloud_mgmt_ips: dict
:return: A dictionary with subcloud regions as keys and their respective
service endpoints as values.
:rtype: dict
"""
subcloud_endpoints = {}
for region, ip in subcloud_mgmt_ips.items():
subcloud_endpoints[region] = build_subcloud_endpoint_map(ip)
return subcloud_endpoints
def build_subcloud_endpoint(ip: str, service: str) -> str:
"""Builds a service endpoint for a given IP address.
:param ip: The IP address for constructing the service endpoint.
:type ip: str
:param service: The service of the endpoint
:type service: str
:return: The service endpoint URL.
:type: str
"""
endpoint = ENDPOINT_URLS.get(service, None)
if endpoint:
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
endpoint = endpoint.format(formatted_ip)
return endpoint
class EndpointCache(object): class EndpointCache(object):
"""Cache for storing endpoint information. """Cache for storing endpoint information.
@ -161,7 +95,7 @@ class EndpointCache(object):
): ):
LOG.info("Initializing and caching subcloud endpoints") LOG.info("Initializing and caching subcloud endpoints")
# pylint: disable=not-callable # pylint: disable=not-callable
EndpointCache.subcloud_endpoints = build_subcloud_endpoints( EndpointCache.subcloud_endpoints = utils.build_subcloud_endpoints(
EndpointCache.fetch_subcloud_ips() EndpointCache.fetch_subcloud_ips()
) )
LOG.info("Finished initializing and caching subcloud endpoints") LOG.info("Finished initializing and caching subcloud endpoints")
@ -423,7 +357,7 @@ class EndpointCache(object):
endpoint URLs as values. endpoint URLs as values.
:rtype: dict :rtype: dict
""" """
endpoint_map = build_subcloud_endpoint_map(management_ip) endpoint_map = utils.build_subcloud_endpoint_map(management_ip)
cls.update_master_service_endpoint_region(region_name, endpoint_map) cls.update_master_service_endpoint_region(region_name, endpoint_map)
return endpoint_map return endpoint_map
@ -441,7 +375,7 @@ class EndpointCache(object):
) )
# pylint: disable-next=not-callable # pylint: disable-next=not-callable
subcloud_ip = EndpointCache.fetch_subcloud_ips(region_name) subcloud_ip = EndpointCache.fetch_subcloud_ips(region_name)
endpoint_map = build_subcloud_endpoint_map(subcloud_ip) endpoint_map = utils.build_subcloud_endpoint_map(subcloud_ip)
# pylint: disable-next=unsupported-assignment-operation # pylint: disable-next=unsupported-assignment-operation
EndpointCache.subcloud_endpoints[region_name] = endpoint_map EndpointCache.subcloud_endpoints[region_name] = endpoint_map
@ -460,7 +394,7 @@ class EndpointCache(object):
# token is expiring soon # token is expiring soon
token_expiring_soon = False token_expiring_soon = False
if EndpointCache.master_keystone_client is None or ( if EndpointCache.master_keystone_client is None or (
token_expiring_soon := is_token_expiring_soon( token_expiring_soon := utils.is_token_expiring_soon(
token=EndpointCache.master_token token=EndpointCache.master_token
) )
): ):

View File

@ -26,6 +26,7 @@ from oslo_config import cfg
from dccommon import endpoint_cache from dccommon import endpoint_cache
from dccommon.tests import base from dccommon.tests import base
from dccommon import utils
FAKE_REGIONONE_SYSINV_ENDPOINT = "http://[2620:10a:a001:a114::d00]:6385/v1" FAKE_REGIONONE_SYSINV_ENDPOINT = "http://[2620:10a:a001:a114::d00]:6385/v1"
FAKE_REGIONONE_KEYSTONE_ENDPOINT = "http://[2620:10a:a001:a114::d00]:5000/v3" FAKE_REGIONONE_KEYSTONE_ENDPOINT = "http://[2620:10a:a001:a114::d00]:5000/v3"
@ -90,14 +91,7 @@ class EndpointCacheTest(base.DCCommonTestCase):
] ]
cfg.CONF.register_opts(auth_uri_opts, "endpoint_cache") cfg.CONF.register_opts(auth_uri_opts, "endpoint_cache")
# Mock the token validator (which is confusing so here is the info) p = mock.patch.object(utils, "is_token_expiring_soon")
# endpoint_cache.py has an import:
# from dccommon.utils import is_token_expiring_soon
# so to patch where that function is called we use this syntax:
# patch.object(endpoint_cache, 'is_token_expiring_soon')
# instead of:
# patch.object(dccommon.utils, 'is_token_expiring_soon')
p = mock.patch.object(endpoint_cache, "is_token_expiring_soon")
self.mock_is_token_expiring_soon = p.start() self.mock_is_token_expiring_soon = p.start()
self.mock_is_token_expiring_soon.return_value = True self.mock_is_token_expiring_soon.return_value = True
self.addCleanup(p.stop) self.addCleanup(p.stop)
@ -198,7 +192,7 @@ class EndpointCacheTest(base.DCCommonTestCase):
ips = ("192.168.1.1", "2620:10a:a001:ac09::7ce0") ips = ("192.168.1.1", "2620:10a:a001:ac09::7ce0")
for ip in ips: for ip in ips:
expected = self._get_expected_endpoints(ip) expected = self._get_expected_endpoints(ip)
result = endpoint_cache.build_subcloud_endpoint_map(ip) result = utils.build_subcloud_endpoint_map(ip)
self.assertEqual(expected, result) self.assertEqual(expected, result)
def test_build_subcloud_endpoint_map_fails(self): def test_build_subcloud_endpoint_map_fails(self):
@ -213,7 +207,7 @@ class EndpointCacheTest(base.DCCommonTestCase):
for ip in ips: for ip in ips:
self.assertRaises( self.assertRaises(
netaddr.AddrFormatError, netaddr.AddrFormatError,
endpoint_cache.build_subcloud_endpoint_map, utils.build_subcloud_endpoint_map,
ip, ip,
) )
@ -229,11 +223,11 @@ class EndpointCacheTest(base.DCCommonTestCase):
} }
self.assertEqual( self.assertEqual(
expected_result, expected_result,
endpoint_cache.build_subcloud_endpoints(subcloud_mgmt_ips), utils.build_subcloud_endpoints(subcloud_mgmt_ips),
) )
def test_empty_ip_dict_succeeds(self): def test_empty_ip_dict_succeeds(self):
empty_ips = {} empty_ips = {}
expected_endpoints = {} expected_endpoints = {}
actual_endpoints = endpoint_cache.build_subcloud_endpoints(empty_ips) actual_endpoints = utils.build_subcloud_endpoints(empty_ips)
self.assertEqual(expected_endpoints, actual_endpoints) self.assertEqual(expected_endpoints, actual_endpoints)

View File

@ -24,6 +24,7 @@ import time
from typing import Callable from typing import Callable
from eventlet.green import subprocess from eventlet.green import subprocess
import netaddr
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import timeutils from oslo_utils import timeutils
@ -412,3 +413,52 @@ def log_subcloud_msg(
if avail_status: if avail_status:
prefix += f"Availability: {avail_status}. " prefix += f"Availability: {avail_status}. "
log_func(f"{prefix}{msg}") log_func(f"{prefix}{msg}")
def build_subcloud_endpoint_map(ip: str) -> dict:
"""Builds a mapping of service endpoints for a given IP address.
:param ip: The IP address for which service endpoints need to be mapped.
:type ip: str
:return: A dictionary containing service names as keys and formatted
endpoint URLs as values.
:rtype: dict
"""
endpoint_map = {}
for service, endpoint in consts.ENDPOINT_URLS.items():
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
endpoint_map[service] = endpoint.format(formatted_ip)
return endpoint_map
def build_subcloud_endpoints(subcloud_mgmt_ips: dict) -> dict:
"""Builds a dictionary of service endpoints for multiple subcloud management IPs.
:param subcloud_mgmt_ips: A dictionary containing subcloud regions as keys
and the corresponding management IP as value.
:type subcloud_mgmt_ips: dict
:return: A dictionary with subcloud regions as keys and their respective
service endpoints as values.
:rtype: dict
"""
subcloud_endpoints = {}
for region, ip in subcloud_mgmt_ips.items():
subcloud_endpoints[region] = build_subcloud_endpoint_map(ip)
return subcloud_endpoints
def build_subcloud_endpoint(ip: str, service: str) -> str:
"""Builds a service endpoint for a given IP address.
:param ip: The IP address for constructing the service endpoint.
:type ip: str
:param service: The service of the endpoint
:type service: str
:return: The service endpoint URL.
:type: str
"""
endpoint = consts.ENDPOINT_URLS.get(service, None)
if endpoint:
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip
endpoint = endpoint.format(formatted_ip)
return endpoint

View File

@ -14,6 +14,7 @@
# under the License. # under the License.
# #
# TODO(nicodemos): Remove this file after all support to patching is removed
from keystoneauth1 import exceptions as keystone_exceptions from keystoneauth1 import exceptions as keystone_exceptions
from oslo_log import log as logging from oslo_log import log as logging
@ -22,7 +23,7 @@ from dccommon.drivers.openstack import patching_v1
from dccommon.drivers.openstack.patching_v1 import PatchingClient from dccommon.drivers.openstack.patching_v1 import PatchingClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon.endpoint_cache import build_subcloud_endpoint from dccommon.utils import build_subcloud_endpoint
from dcmanager.common import utils from dcmanager.common import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -11,7 +11,7 @@ from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.keystone_v3 import KeystoneClient as ks_client from dccommon.drivers.openstack.keystone_v3 import KeystoneClient as ks_client
from dccommon.drivers.openstack import sdk_platform from dccommon.drivers.openstack import sdk_platform
from dccommon.drivers.openstack import software_v1 from dccommon.drivers.openstack import software_v1
from dccommon.endpoint_cache import build_subcloud_endpoint from dccommon.utils import build_subcloud_endpoint
from dccommon.utils import log_subcloud_msg from dccommon.utils import log_subcloud_msg
from dcmanager.common import utils from dcmanager.common import utils

View File

@ -26,8 +26,7 @@ from dccommon.drivers.openstack.fm import FmClient
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon import endpoint_cache from dccommon import endpoint_cache
from dccommon.utils import log_subcloud_msg from dccommon import utils as dccommon_utils
from dccommon.utils import subcloud_has_dcagent
from dcmanager.audit import alarm_aggregation from dcmanager.audit import alarm_aggregation
from dcmanager.audit import base_audit from dcmanager.audit import base_audit
from dcmanager.audit import firmware_audit from dcmanager.audit import firmware_audit
@ -347,7 +346,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
failures = list() failures = list()
availability_data = dict() availability_data = dict()
endpoint_data = dict() endpoint_data = dict()
has_dcagent = subcloud_has_dcagent(subcloud.software_version) has_dcagent = dccommon_utils.subcloud_has_dcagent(subcloud.software_version)
# Set defaults to None and disabled so we will still set disabled # Set defaults to None and disabled so we will still set disabled
# status if we encounter an error. # status if we encounter an error.
@ -369,7 +368,7 @@ class SubcloudAuditWorkerManager(manager.Manager):
dcagent_client = DcagentClient( dcagent_client = DcagentClient(
subcloud_region, subcloud_region,
admin_session, admin_session,
endpoint=endpoint_cache.build_subcloud_endpoint( endpoint=dccommon_utils.build_subcloud_endpoint(
subcloud_management_ip, "dcagent" subcloud_management_ip, "dcagent"
), ),
) )
@ -495,7 +494,9 @@ class SubcloudAuditWorkerManager(manager.Manager):
if avail_to_set == dccommon_consts.AVAILABILITY_OFFLINE: if avail_to_set == dccommon_consts.AVAILABILITY_OFFLINE:
inactive_sg = audit_value.get("inactive_sg") inactive_sg = audit_value.get("inactive_sg")
msg = f"Inactive service groups: {inactive_sg}" msg = f"Inactive service groups: {inactive_sg}"
log_subcloud_msg(LOG.debug, msg, subcloud_name, avail_to_set) dccommon_utils.log_subcloud_msg(
LOG.debug, msg, subcloud_name, avail_to_set
)
alarms = audit_value.get("alarms") alarms = audit_value.get("alarms")
if alarms: if alarms:
self.alarm_aggr.update_alarm_summary(subcloud_name, alarms) self.alarm_aggr.update_alarm_summary(subcloud_name, alarms)

View File

@ -341,11 +341,13 @@ EXTRA_ARGS_SUBJECT = "subject"
EXTRA_ARGS_SYSADMIN_PASSWORD = "sysadmin_password" EXTRA_ARGS_SYSADMIN_PASSWORD = "sysadmin_password"
EXTRA_ARGS_FORCE = "force" EXTRA_ARGS_FORCE = "force"
# TODO(nicodemos): Remove after patching is no longer supported
# extra_args for patching # extra_args for patching
EXTRA_ARGS_UPLOAD_ONLY = "upload-only" EXTRA_ARGS_UPLOAD_ONLY = "upload-only"
EXTRA_ARGS_PATCH_ID = "patch_id" EXTRA_ARGS_PATCH_ID = "patch_id"
# sw_version supported for patching # TODO(nicodemos): Remove after patching is no longer supported
# sw_version supported for patching legacy
PATCHING_SW_VERSION = "22.12" PATCHING_SW_VERSION = "22.12"
# extra_args for software # extra_args for software

View File

@ -145,6 +145,7 @@ def validate_bootstrap_values(payload: dict):
pecan.abort(400, _("external_oam_floating_address required")) pecan.abort(400, _("external_oam_floating_address required"))
# TODO(nicodemos): Change to verify the releases instead of patching
def validate_system_controller_patch_status(operation: str): def validate_system_controller_patch_status(operation: str):
ks_client = get_ks_client() ks_client = get_ks_client()
patching_client = PatchingClient( patching_client = PatchingClient(

View File

@ -45,15 +45,13 @@ import yaml
from dccommon import consts as dccommon_consts from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon.endpoint_cache import EndpointCache from dccommon import endpoint_cache
from dccommon.exceptions import PlaybookExecutionFailed from dccommon.exceptions import PlaybookExecutionFailed
from dccommon.exceptions import SubcloudNotFound from dccommon.exceptions import SubcloudNotFound
from dccommon import kubeoperator from dccommon import kubeoperator
from dccommon.subcloud_enrollment import SubcloudEnrollmentInit from dccommon.subcloud_enrollment import SubcloudEnrollmentInit
from dccommon.subcloud_install import SubcloudInstall from dccommon.subcloud_install import SubcloudInstall
from dccommon.utils import AnsiblePlaybook from dccommon import utils as dccommon_utils
from dccommon.utils import LAST_SW_VERSION_IN_CENTOS
from dccommon.utils import send_subcloud_shutdown_signal
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
from dcmanager.common import consts from dcmanager.common import consts
from dcmanager.common.consts import INVENTORY_FILE_POSTFIX from dcmanager.common.consts import INVENTORY_FILE_POSTFIX
@ -107,7 +105,7 @@ ANSIBLE_VALIDATE_KEYSTONE_PASSWORD_SCRIPT = (
USERS_TO_REPLICATE = [ USERS_TO_REPLICATE = [
"sysinv", "sysinv",
"patching", "patching", # TODO(nicodemos): Remove after patching is removed
"usm", "usm",
"vim", "vim",
"mtce", "mtce",
@ -168,15 +166,6 @@ MAX_PARALLEL_SUBCLOUD_BACKUP_DELETE = 250
MAX_PARALLEL_SUBCLOUD_BACKUP_RESTORE = 100 MAX_PARALLEL_SUBCLOUD_BACKUP_RESTORE = 100
CENTRAL_BACKUP_DIR = "/opt/dc-vault/backups" CENTRAL_BACKUP_DIR = "/opt/dc-vault/backups"
ENDPOINT_URLS = {
dccommon_consts.ENDPOINT_TYPE_PLATFORM: "https://{}:6386/v1",
dccommon_consts.ENDPOINT_TYPE_IDENTITY: "https://{}:5001/v3",
dccommon_consts.ENDPOINT_TYPE_PATCHING: "https://{}:5492",
dccommon_consts.ENDPOINT_TYPE_FM: "https://{}:18003",
dccommon_consts.ENDPOINT_TYPE_NFV: "https://{}:4546",
dccommon_consts.ENDPOINT_TYPE_SOFTWARE: "https://{}:5498",
}
# Values for the exponential backoff retry to get subcloud's # Values for the exponential backoff retry to get subcloud's
# certificate secret. # certificate secret.
MAX_ATTEMPTS_TO_GET_INTERMEDIATE_CA_CERT = 15 MAX_ATTEMPTS_TO_GET_INTERMEDIATE_CA_CERT = 15
@ -564,7 +553,7 @@ class SubcloudManager(manager.Manager):
# TODO(yuxing) Remove the validate_keystone_passwords_script when end # TODO(yuxing) Remove the validate_keystone_passwords_script when end
# the support of rehoming a subcloud with a software version below 22.12 # the support of rehoming a subcloud with a software version below 22.12
if software_version <= LAST_SW_VERSION_IN_CENTOS: if software_version <= dccommon_utils.LAST_SW_VERSION_IN_CENTOS:
extra_vars += ( extra_vars += (
" validate_keystone_passwords_script='%s'" " validate_keystone_passwords_script='%s'"
% ANSIBLE_VALIDATE_KEYSTONE_PASSWORD_SCRIPT % ANSIBLE_VALIDATE_KEYSTONE_PASSWORD_SCRIPT
@ -999,7 +988,7 @@ class SubcloudManager(manager.Manager):
# Run the rehome-subcloud playbook # Run the rehome-subcloud playbook
try: try:
ansible = AnsiblePlaybook(subcloud.name) ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
ansible.run_playbook(log_file, rehome_command) ansible.run_playbook(log_file, rehome_command)
except PlaybookExecutionFailed: except PlaybookExecutionFailed:
msg = ( msg = (
@ -1443,7 +1432,7 @@ class SubcloudManager(manager.Manager):
subcloud = utils.update_abort_status(context, subcloud_id, deploy_status) subcloud = utils.update_abort_status(context, subcloud_id, deploy_status)
try: try:
ansible = AnsiblePlaybook(subcloud.name) ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
aborted = ansible.run_abort() aborted = ansible.run_abort()
if not aborted: if not aborted:
LOG.warning( LOG.warning(
@ -1455,7 +1444,7 @@ class SubcloudManager(manager.Manager):
if subcloud.deploy_status == consts.DEPLOY_STATE_ABORTING_INSTALL: if subcloud.deploy_status == consts.DEPLOY_STATE_ABORTING_INSTALL:
# Send shutdown signal to subcloud # Send shutdown signal to subcloud
send_subcloud_shutdown_signal(subcloud.name) dccommon_utils.send_subcloud_shutdown_signal(subcloud.name)
except Exception as ex: except Exception as ex:
LOG.error( LOG.error(
"Subcloud deploy abort failed for subcloud %s: %s" "Subcloud deploy abort failed for subcloud %s: %s"
@ -1537,7 +1526,7 @@ class SubcloudManager(manager.Manager):
# TODO(Yuxing) remove replicating the smapi user when end the support # TODO(Yuxing) remove replicating the smapi user when end the support
# of rehoming a subcloud with a software version below 22.12 # of rehoming a subcloud with a software version below 22.12
if subcloud.software_version <= LAST_SW_VERSION_IN_CENTOS: if subcloud.software_version <= dccommon_utils.LAST_SW_VERSION_IN_CENTOS:
payload["users"]["smapi"] = str( payload["users"]["smapi"] = str(
keyring.get_password("smapi", dccommon_consts.SERVICES_USER_NAME) keyring.get_password("smapi", dccommon_consts.SERVICES_USER_NAME)
) )
@ -1693,7 +1682,9 @@ class SubcloudManager(manager.Manager):
# TODO(Yuxing) remove replicating the smapi user when end the support # TODO(Yuxing) remove replicating the smapi user when end the support
# of rehoming a subcloud with a software version below 22.12 # of rehoming a subcloud with a software version below 22.12
if rehoming and subcloud.software_version <= LAST_SW_VERSION_IN_CENTOS: if rehoming and (
subcloud.software_version <= dccommon_utils.LAST_SW_VERSION_IN_CENTOS
):
payload["users"]["smapi"] = str( payload["users"]["smapi"] = str(
keyring.get_password("smapi", dccommon_consts.SERVICES_USER_NAME) keyring.get_password("smapi", dccommon_consts.SERVICES_USER_NAME)
) )
@ -2579,7 +2570,7 @@ class SubcloudManager(manager.Manager):
# Run the subcloud backup playbook # Run the subcloud backup playbook
try: try:
ansible = AnsiblePlaybook(subcloud.name) ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
ansible.run_playbook(log_file, backup_command) ansible.run_playbook(log_file, backup_command)
# Decide between complete-local or complete-central # Decide between complete-local or complete-central
@ -2610,7 +2601,7 @@ class SubcloudManager(manager.Manager):
try: try:
# Run the subcloud backup delete playbook # Run the subcloud backup delete playbook
ansible = AnsiblePlaybook(subcloud.name) ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
ansible.run_playbook(log_file, delete_command) ansible.run_playbook(log_file, delete_command)
# Set backup status to unknown after delete, since most recent backup may # Set backup status to unknown after delete, since most recent backup may
@ -2657,7 +2648,7 @@ class SubcloudManager(manager.Manager):
) )
# Run the subcloud backup restore playbook # Run the subcloud backup restore playbook
try: try:
ansible = AnsiblePlaybook(subcloud.name) ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
ansible.run_playbook( ansible.run_playbook(
log_file, restore_command, timeout=CONF.playbook_timeout log_file, restore_command, timeout=CONF.playbook_timeout
) )
@ -2792,7 +2783,7 @@ class SubcloudManager(manager.Manager):
) )
try: try:
ansible = AnsiblePlaybook(subcloud.name) ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
aborted = ansible.run_playbook(log_file, config_command) aborted = ansible.run_playbook(log_file, config_command)
except PlaybookExecutionFailed: except PlaybookExecutionFailed:
msg = utils.find_ansible_error_msg( msg = utils.find_ansible_error_msg(
@ -2900,7 +2891,7 @@ class SubcloudManager(manager.Manager):
LOG.info(f"Starting enroll of subcloud {subcloud.name}") LOG.info(f"Starting enroll of subcloud {subcloud.name}")
try: try:
ansible = AnsiblePlaybook(subcloud.name) ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
ansible.run_playbook(log_file, enroll_command) ansible.run_playbook(log_file, enroll_command)
except PlaybookExecutionFailed: except PlaybookExecutionFailed:
msg = utils.find_ansible_error_msg( msg = utils.find_ansible_error_msg(
@ -2941,7 +2932,7 @@ class SubcloudManager(manager.Manager):
# Run the ansible subcloud bootstrap playbook # Run the ansible subcloud bootstrap playbook
LOG.info("Starting bootstrap of %s" % subcloud.name) LOG.info("Starting bootstrap of %s" % subcloud.name)
try: try:
ansible = AnsiblePlaybook(subcloud.name) ansible = dccommon_utils.AnsiblePlaybook(subcloud.name)
aborted = ansible.run_playbook(log_file, bootstrap_command) aborted = ansible.run_playbook(log_file, bootstrap_command)
except PlaybookExecutionFailed: except PlaybookExecutionFailed:
msg = utils.find_ansible_error_msg( msg = utils.find_ansible_error_msg(
@ -3776,7 +3767,7 @@ class SubcloudManager(manager.Manager):
) )
subcloud_id = subcloud.id subcloud_id = subcloud.id
try: try:
ansible = AnsiblePlaybook(subcloud_name) ansible = dccommon_utils.AnsiblePlaybook(subcloud_name)
ansible.run_playbook(log_file, update_command) ansible.run_playbook(log_file, update_command)
utils.delete_subcloud_inventory(overrides_file) utils.delete_subcloud_inventory(overrides_file)
except PlaybookExecutionFailed: except PlaybookExecutionFailed:
@ -3895,20 +3886,11 @@ class SubcloudManager(manager.Manager):
def _update_services_endpoint(self, context, payload, subcloud_region, m_ks_client): def _update_services_endpoint(self, context, payload, subcloud_region, m_ks_client):
ip = utils.get_primary_management_start_address(payload) ip = utils.get_primary_management_start_address(payload)
formatted_ip = f"[{ip}]" if netaddr.IPAddress(ip).version == 6 else ip services_endpoints = dccommon_utils.build_subcloud_endpoint_map(ip)
services_endpoints = {
"keystone": "https://{}:5001/v3".format(formatted_ip),
"sysinv": "https://{}:6386/v1".format(formatted_ip),
"fm": "https://{}:18003".format(formatted_ip),
"patching": "https://{}:5492".format(formatted_ip),
"vim": "https://{}:4546".format(formatted_ip),
"usm": "https://{}:5498".format(formatted_ip),
}
LOG.info( LOG.info(
"Update services endpoint to %s in subcloud region %s" "Update services endpoint to %s in subcloud region %s"
% (formatted_ip, subcloud_region) % (ip, subcloud_region)
) )
# Update service URLs in subcloud endpoint cache # Update service URLs in subcloud endpoint cache
self.audit_rpc_client.trigger_subcloud_endpoints_update( self.audit_rpc_client.trigger_subcloud_endpoints_update(
@ -3925,7 +3907,7 @@ class SubcloudManager(manager.Manager):
) )
# Update dcmanager endpoint cache # Update dcmanager endpoint cache
EndpointCache.update_master_service_endpoint_region( endpoint_cache.EndpointCache.update_master_service_endpoint_region(
subcloud_region, services_endpoints subcloud_region, services_endpoints
) )

View File

@ -15,6 +15,8 @@
# under the License. # under the License.
# #
# TODO(nicodemos): Remove this file and all patch states after all support
# to patching is removed
from oslo_log import log as logging from oslo_log import log as logging
from dccommon.drivers.openstack import vim from dccommon.drivers.openstack import vim

View File

@ -10,6 +10,7 @@ It defines methods used in dcmanager orchestrator's to handle the strategy
by its type. by its type.
""" """
# TODO(nicodemos): Remove this file after all support to patching is removed
from oslo_log import log as logging from oslo_log import log as logging
from dccommon import consts as dccommon_consts from dccommon import consts as dccommon_consts

View File

@ -103,19 +103,36 @@ FAKE_PROJECTS = [
class FakeService(object): class FakeService(object):
def __init__(self, type, id): def __init__(self, name, type, id):
self.name = name
self.type = type self.type = type
self.id = id self.id = id
FAKE_SERVICES = [ FAKE_SERVICES = [
FakeService(dccommon_consts.ENDPOINT_TYPE_PLATFORM, 1), FakeService(
FakeService(dccommon_consts.ENDPOINT_TYPE_IDENTITY, 2), dccommon_consts.ENDPOINT_NAME_SYSINV, dccommon_consts.ENDPOINT_TYPE_PLATFORM, 1
FakeService(dccommon_consts.ENDPOINT_TYPE_PATCHING, 3), ),
FakeService(dccommon_consts.ENDPOINT_TYPE_FM, 4), FakeService(
FakeService(dccommon_consts.ENDPOINT_TYPE_NFV, 5), dccommon_consts.ENDPOINT_NAME_KEYSTONE,
FakeService(dccommon_consts.ENDPOINT_TYPE_DC_CERT, 6), dccommon_consts.ENDPOINT_TYPE_IDENTITY,
FakeService(dccommon_consts.ENDPOINT_TYPE_SOFTWARE, 7), 2,
),
FakeService(
dccommon_consts.ENDPOINT_TYPE_PATCHING,
dccommon_consts.ENDPOINT_TYPE_PATCHING,
3,
),
FakeService(dccommon_consts.ENDPOINT_NAME_FM, dccommon_consts.ENDPOINT_TYPE_FM, 4),
FakeService(
dccommon_consts.ENDPOINT_NAME_VIM, dccommon_consts.ENDPOINT_TYPE_NFV, 5
),
FakeService(
dccommon_consts.ENDPOINT_TYPE_DC_CERT, dccommon_consts.ENDPOINT_TYPE_DC_CERT, 6
),
FakeService(
dccommon_consts.ENDPOINT_NAME_USM, dccommon_consts.ENDPOINT_TYPE_SOFTWARE, 7
),
] ]

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# TODO(nicodemos): Remove this file after all support to patching is removed
import glob import glob
import json import json
@ -36,7 +37,6 @@ from dcorch.api.proxy.common.service import Middleware
from dcorch.api.proxy.common import utils as proxy_utils from dcorch.api.proxy.common import utils as proxy_utils
from dcorch.common import context from dcorch.common import context
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF

View File

@ -26,11 +26,10 @@ from oslo_utils import timeutils
from dccommon import consts as dccommon_consts from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack.sdk_platform import OpenStackDriver from dccommon.drivers.openstack.sdk_platform import OpenStackDriver
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
from dccommon.endpoint_cache import build_subcloud_endpoint
from dccommon import exceptions as dccommon_exceptions from dccommon import exceptions as dccommon_exceptions
from dccommon.utils import build_subcloud_endpoint
from dcorch.common import consts from dcorch.common import consts
from dcorch.common import exceptions from dcorch.common import exceptions
from dcorch.engine.fernet_key_manager import FERNET_REPO_MASTER_ID from dcorch.engine.fernet_key_manager import FERNET_REPO_MASTER_ID
from dcorch.engine.fernet_key_manager import FernetKeyManager from dcorch.engine.fernet_key_manager import FernetKeyManager
from dcorch.engine.sync_thread import AUDIT_RESOURCE_EXTRA from dcorch.engine.sync_thread import AUDIT_RESOURCE_EXTRA

View File

@ -24,8 +24,8 @@ from oslo_utils import timeutils
from dccommon import consts as dccommon_consts from dccommon import consts as dccommon_consts
from dccommon.drivers.openstack import sdk_platform as sdk from dccommon.drivers.openstack import sdk_platform as sdk
from dccommon.endpoint_cache import build_subcloud_endpoint
from dccommon.endpoint_cache import EndpointCache from dccommon.endpoint_cache import EndpointCache
from dccommon.utils import build_subcloud_endpoint
from dcdbsync.dbsyncclient import client as dbsyncclient from dcdbsync.dbsyncclient import client as dbsyncclient
from dcmanager.rpc import client as dcmanager_rpc_client from dcmanager.rpc import client as dcmanager_rpc_client
from dcorch.common import consts from dcorch.common import consts