From 16c5d9e21480558e2e3e3aa5117a966fec455691 Mon Sep 17 00:00:00 2001 From: dkumbhar Date: Sat, 24 Oct 2020 14:49:30 +0000 Subject: [PATCH] lb octavia backend validation, mdproxy validation, rbac with lb Change-Id: I4de9e15570cb6981612bba0dd6b4247bb8dd252d --- .../lib/traffic_manager.py | 2 +- .../services/nsxv_client.py | 87 ++ .../services/openstack_network_clients.py | 23 +- .../scenario/test_octavia_loadbalancers.py | 927 +++++++++++++++++- .../tests/scenario/test_new_case_coverage.py | 6 + 5 files changed, 1022 insertions(+), 23 deletions(-) diff --git a/vmware_nsx_tempest_plugin/lib/traffic_manager.py b/vmware_nsx_tempest_plugin/lib/traffic_manager.py index 9526034..50de7d9 100644 --- a/vmware_nsx_tempest_plugin/lib/traffic_manager.py +++ b/vmware_nsx_tempest_plugin/lib/traffic_manager.py @@ -271,7 +271,7 @@ class TrafficManager(appliance_manager.ApplianceManager): start_server = ('while true; do ' 'sudo nc -ll -p %(port)s -e sh /tmp/%(script)s; ' 'done > /dev/null &') - cmd = start_server % {'port': constants.HTTP_PORT, + cmd = start_server % {'port': protocol_port, 'script': 'script'} ssh_client.exec_command(cmd) diff --git a/vmware_nsx_tempest_plugin/services/nsxv_client.py b/vmware_nsx_tempest_plugin/services/nsxv_client.py index 2242d54..61d4345 100644 --- a/vmware_nsx_tempest_plugin/services/nsxv_client.py +++ b/vmware_nsx_tempest_plugin/services/nsxv_client.py @@ -441,3 +441,90 @@ class VSMClient(object): return True else: return False + + def check_cert_at_backend(self, lb_id=None, cert_conent=None): + lbaas_edge_name = 'lbaas-' + lb_id[:-6] + edge_id = self.get_lbaas_edge_id(lbaas_edge_name) + self.__set_api_version('2.0') + endpoint = '/services/truststore/certificate/scope/%s' % edge_id + self.__set_endpoint(endpoint) + response = self.get() + certs = response.json()['certificates'] + for cert in certs: + if cert['pemEncoding'] == cert_conent: + return True + else: + return False + + def get_lbaas_edge_id(self, edge_name): + edges = self.get_all_edges() + edge_ids = [e['id'] for e in edges if edge_name in e['name']] + if len(edge_ids) == 0: + LOG.debug('Edge %s NOT found!' % edge_name) + edge = None + else: + edge = edge_ids[0] + LOG.debug('Found edge: %s' % edge) + return edge + + def get_lbaas_config_from_edge(self, lbaas_id): + lbaas_edge_name = 'lbaas-' + lbaas_id[:-6] + edge_id = self.get_lbaas_edge_id(lbaas_edge_name) + if edge_id is not None: + self.__set_api_version('4.0') + self.__set_endpoint('/edges/%s/loadbalancer/config' % edge_id) + response = self.get() + lbaas_config = response.json() + else: + LOG.debug('Edge NOT found with lb_id %s or deleted!' % lbaas_id) + lbaas_config = [] + return lbaas_config + + def verify_lbaas_on_edge(self, lbaas_id, listener=None, + pool=None, member=None, + hmonitor=None, cleanup=[]): + lbaas_config = self.get_lbaas_config_from_edge(lbaas_id) + if hmonitor: + hms_vsm = [hm['id'] for hm in lbaas_config['monitor']] + if 'hm' in cleanup: + self.assertFalse(hmonitor['id'] in hms_vsm) + else: + self.assertTrue(hmonitor['id'] in hms_vsm) + if pool: + pool_vsm = \ + [(p['name'], p['algorithm']) for p in lbaas_config['pool']] + if 'pool' in cleanup: + self.assertFalse(('pool_' + pool['id'], + 'round-robin') in pool_vsm) + else: + self.assertTrue(('pool_' + pool['id'], + 'round-robin') in pool_vsm) + if listener: + listener_vsm = \ + [lr['name'] for lr in lbaas_config['virtualServer']] + if 'listener' in cleanup: + self.assertFalse('vip_' + listener['id'] in listener_vsm) + else: + self.assertTrue('vip_' + listener['id'] in listener_vsm) + + def verify_member_status_of_md_proxy_edges(self): + md_edge_name = 'metadata' + edge_id = self.get_lbaas_edge_id(md_edge_name) + result = False + up_members = 0 + if edge_id is not None: + self.__set_api_version('4.0') + self.__set_endpoint('/edges/%s/loadbalancer/statistics' % edge_id) + response = self.get() + members = response.json()['pool'][0]['member'] + if members: + for member in members: + if member['status'] == 'UP': + up_members += 1 + if len(members) == up_members: + result = True + else: + LOG.debug('pool are not present on required edge') + else: + LOG.debug('Edge NOT found with mdproxy or deleted!') + return result diff --git a/vmware_nsx_tempest_plugin/services/openstack_network_clients.py b/vmware_nsx_tempest_plugin/services/openstack_network_clients.py index f3a00a0..8beb839 100644 --- a/vmware_nsx_tempest_plugin/services/openstack_network_clients.py +++ b/vmware_nsx_tempest_plugin/services/openstack_network_clients.py @@ -752,8 +752,12 @@ class OctaviaListenersClient(base.BaseNetworkClient): uri = self.resource_object_path % listener_id return self.delete_resource(uri) - def update_octavia_listener(self, listener_id, default_pool_id): - post_data = {"listener": {"default_pool_id": default_pool_id}} + def update_octavia_listener(self, listener_id, default_pool_id=None, + listener_data=None): + if default_pool_id: + post_data = {"listener": {"default_pool_id": default_pool_id}} + if listener_data: + post_data = {"listener": listener_data} uri = self.resource_object_path % listener_id return self.update_resource(uri, post_data) @@ -784,6 +788,11 @@ class OctaviaPoolsClient(base.BaseNetworkClient): uri = self.resource_base_path return self.list_resources(uri, **filters) + def update_octavia_pool(self, pool_id, pool_data): + post_data = {"pool": pool_data} + uri = self.resource_object_path % pool_id + return self.update_resource(uri, post_data) + class OctaviaHealthMonitorClient(base.BaseNetworkClient): """ @@ -807,6 +816,11 @@ class OctaviaHealthMonitorClient(base.BaseNetworkClient): uri = self.resource_object_path % hm_id return self.delete_resource(uri) + def update_octavia_hm(self, hm_id, hm_data): + post_data = {"healthmonitor": hm_data} + uri = self.resource_object_path % hm_id + return self.update_resource(uri, post_data) + class OctaviaMembersClient(base.BaseNetworkClient): """ @@ -829,6 +843,11 @@ class OctaviaMembersClient(base.BaseNetworkClient): uri = self.resource_object_path % (pool_id, member_id) return self.delete_resource(uri) + def update_octavia_member(self, pool_id, member_id, member_data): + post_data = {'member': member_data} + uri = self.resource_object_path % (pool_id, member_id) + return self.update_resource(uri, post_data) + class OctaviaL7PolicyClient(base.BaseNetworkClient): """ diff --git a/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py b/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py index 605734f..e558454 100644 --- a/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py +++ b/vmware_nsx_tempest_plugin/tests/nsxv/scenario/test_octavia_loadbalancers.py @@ -12,6 +12,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import re from tempest import config from tempest.lib.common.utils import data_utils @@ -21,7 +22,7 @@ from tempest import test from vmware_nsx_tempest_plugin.common import constants from vmware_nsx_tempest_plugin.lib import feature_manager - +from vmware_nsx_tempest_plugin.services import nsxv_client LOG = constants.log.getLogger(__name__) CONF = config.CONF @@ -58,6 +59,11 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): @classmethod def resource_setup(cls): super(OctaviaRoundRobin, cls).resource_setup() + if CONF.network.backend == "nsxv": + manager_ip = re.search(r"(\d{1,3}\.){3}\d{1,3}", + CONF.nsxv.manager_uri).group(0) + cls.vsm = nsxv_client.VSMClient( + manager_ip, CONF.nsxv.user, CONF.nsxv.password) @classmethod def setup_credentials(cls): @@ -86,15 +92,210 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): if self.loadbalancer: LOG.debug("tearDown lbass") lb_id = self.loadbalancer['id'] - self.delete_octavia_lb_resources(lb_id) - + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + if self.loadbalancer['name'] in lb_names: + self.delete_octavia_lb_resources(lb_id) + else: + LOG.debug("tearDown skipped as lb already deleted") LOG.debug("tearDown lbaas exiting...") super(OctaviaRoundRobin, self).tearDown() + def _assign_floating_ip_to_vip(self): + vip_port = self.loadbalancer['vip_port_id'] + sg_id = self.sg['id'] + self.cmgr_adm.ports_client.update_port(vip_port, + security_groups=[sg_id]) + fip_client = self.cmgr_adm.floating_ips_client + vip_port = self.loadbalancer['vip_port_id'] + vip_fip = self.create_floatingip(self.loadbalancer, client=fip_client, + port_id=vip_port) + self.vip_ip_address = vip_fip['floating_ip_address'] + + def _find_listener(self, vsm_listeners, listener): + port = listener['protocol_port'] + if listener['protocol'] == 'TCP': + proto = 'tcp' + if listener['protocol'] == 'HTTP': + proto = 'http' + if listener['protocol'] == 'HTTPS': + proto = 'https' + for l_1 in vsm_listeners: + if (l_1['name'], l_1['protocol'], l_1['port']) == \ + ('vip_' + listener['id'], proto, str(port)): + return l_1 + else: + return False + + def _find_application_profile(self, l_1, lbaas_config, sp): + vsm_app_profiles = lbaas_config['applicationProfile'] + for app in vsm_app_profiles: + if l_1['applicationProfileId'] == app['applicationProfileId']: + try: + if app['persistence']['method'] == sp: + return app + except Exception: + return False + else: + return False + + def _find_pool(self, l1, lbaas_config, pool): + pool_vsm = lbaas_config['pool'] + if pool['lb_algorithm'] == 'ROUND_ROBIN': + lb_algo = 'round-robin' + if pool['lb_algorithm'] == 'LEAST_CONNECTIONS': + lb_algo = 'leastconn' + if pool['lb_algorithm'] == 'SOURCE_IP': + lb_algo = 'ip-hash' + for p in pool_vsm: + try: + if l1['defaultPoolId'] == p['poolId'] and \ + ('pool_' + pool['id'], lb_algo) == \ + (p['name'], p['algorithm']): + return p + except Exception: + return False + else: + return False + + def _verify_lbaas_on_edge(self, lb_id, listener=None, pool=None, + member=None, hmonitor=None, + session_persistence=None, cleanup=[]): + if 'ALL' in cleanup: + cleanup = ['hm', 'member', 'pool', 'listener'] + lbaas_config = self.vsm.get_lbaas_config_from_edge(lb_id) + if lbaas_config: + if hmonitor: + lbaas_hm_config = lbaas_config['monitor'] + if hmonitor['type'] == 'PING': + h_type = 'icmp' + if hmonitor['type'] == 'TCP': + h_type = 'tcp' + if hmonitor['type'] == 'HTTP': + h_type = 'http' + hms_vsm = [(hm['name'], hm['type']) for hm in lbaas_hm_config] + if 'hm' in cleanup: + msg = hmonitor['id'] + ' hm isnt deleted in backend' + self.assertFalse((hmonitor['id'], h_type) in hms_vsm) + else: + msg = hmonitor['id'] + ' hm is delete or not present' + self.assertTrue((hmonitor['id'], h_type) in hms_vsm) + if pool: + l1 = self.\ + _find_listener(vsm_listeners=lbaas_config['virtualServer'], + listener=listener) + found_pool = self._find_pool(l1, lbaas_config, pool) + if 'pool' in cleanup: + msg = pool['id'] + ' pool isnt deleted in backend' + self.assertFalse(found_pool) + else: + msg = pool['id'] + ' pool isnt with listener or deleted' + self.assertTrue(found_pool, msg) + if listener: + l1 = self.\ + _find_listener(vsm_listeners=lbaas_config['virtualServer'], + listener=listener) + if 'listener' in cleanup: + msg = listener['id'] + ' listener isnt deleted in backend' + self.assertFalse(l1) + else: + msg = listener['id'] + ' listener is deleted or not exist' + self.assertTrue(l1) + if session_persistence: + sp = None + if session_persistence == 'SOURCE_IP': + sp = 'sourceip' + app = self._find_application_profile(l1, lbaas_config, sp) + if session_persistence != "None": + msg = 'session persistence value is not in backend' + self.assertTrue(app, msg) + else: + msg = ' session persistence value is not set as None' + self.assertFalse(app) + if member: + pool_name = 'pool_' + pool['id'] + for p in lbaas_config['pool']: + if pool_name == p['name']: + members_vsm = \ + [(m['name'], m['ipAddress']) for m in p['member']] + address = member['address'] + if 'member' in cleanup: + self.assertFalse(('member-' + member['id'], + address) in members_vsm) + else: + self.assertTrue(('member-' + member['id'], + address) in members_vsm) + else: + LOG.debug('lbaas_config is not present,' + 'either edge deleted to config is deleted') + + def _update_lb_components(self, lb_id, hm, member, pool, listener): + # Update healthmonitor & verify + hm_data = {'name': 'new_hm', 'timeout': 20} + self.healthmonitor = self.octavia_hm_client.\ + update_octavia_hm(hm['id'], + hm_data)['healthmonitor'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=listener, + pool=pool, hmonitor=self.healthmonitor, + member=member, cleanup=[]) + # Update members & revert oneof member & verify + member_data = {'name': 'member0_new', 'weight': 100} + member_id = member['id'] + member = self.octavia_admin_members_client.\ + update_octavia_member(pool['id'], member_id, + member_data)['member'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=listener, + pool=pool, + member=member, + cleanup=[]) + # Update pool with name & lb_algo, revert same change & verify + pool_data = {'name': 'newPool', 'lb_algorithm': 'LEAST_CONNECTIONS', + 'session_persistence': None} + self.pool = self.octavia_admin_pools_client.\ + update_octavia_pool(pool['id'], pool_data)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=listener, + pool=self.pool, + member=member, + session_persistence="None", + cleanup=[]) + # Update listener with another pool & verify + self.pool1 = self.octavia_admin_pools_client.\ + create_octavia_pool(loadbalancer_id=lb_id, + lb_algorithm='ROUND_ROBIN', + protocol='TCP', + name='pool2', + session_persistence=None)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + listener_data = {'name': 'new_listner', + 'default_pool_id': self.pool1['id']} + self.listener = self.octavia_admin_listener_client.\ + update_octavia_listener(listener['id'], + listener_data=listener_data)['listener'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool1, + cleanup=[]) + listener_data = {'name': 'new_listner', + 'default_pool_id': pool['id']} + self.listener = self.octavia_admin_listener_client.\ + update_octavia_listener(listener['id'], + listener_data=listener_data)['listener'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_pools_client.\ + delete_octavia_pool(self.pool1['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + def deploy_octavia_topology(self, no_of_servers=2, image_id=None): router_name = data_utils.rand_name('router_lbaas') - kwargs = {'name': router_name, - 'router_type': 'exclusive', + kwargs = {'name': router_name, 'router_type': 'exclusive', 'external_gateway_info': {"network_id": CONF.network.public_network_id}} router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs) @@ -115,6 +316,9 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lbaas_rules = [dict(direction='ingress', protocol='tcp', port_range_min=constants.HTTP_PORT, port_range_max=constants.HTTP_PORT, ), + dict(direction='ingress', protocol='tcp', + port_range_min=1212, + port_range_max=1212, ), dict(direction='ingress', protocol='tcp', port_range_min=443, port_range_max=443, )] t_id = network_lbaas_1['tenant_id'] @@ -146,6 +350,151 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): return dict(router=router_lbaas, subnet=subnet_lbaas, network=network_lbaas_1) + def deploy_octavia_topology_with_multi_network(self, no_of_servers=2, + image_id=None): + router_name = data_utils.rand_name('router_lbaas') + kwargs = {'name': router_name, + 'router_type': 'exclusive', + 'external_gateway_info': + {"network_id": CONF.network.public_network_id}} + router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs) + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + self.routers_client.delete_router, + router_lbaas['router']['id']) + networks_client = self.cmgr_adm.networks_client + name = "network_lbaas_1" + network_lbaas_1 = self.\ + create_topology_network(name, + networks_client=networks_client) + name = "network_lbaas_2" + network_lbaas_2 = self.\ + create_topology_network(name, + networks_client=networks_client) + sec_rule_client = self.cmgr_adm.security_group_rules_client + sec_client = self.cmgr_adm.security_groups_client + kwargs = dict(tenant_id=network_lbaas_1['tenant_id'], + security_group_rules_client=sec_rule_client, + security_groups_client=sec_client) + self.sg = self.create_topology_security_group(**kwargs) + lbaas_rules = [dict(direction='ingress', protocol='tcp', + port_range_min=constants.HTTP_PORT, + port_range_max=constants.HTTP_PORT, ), + dict(direction='ingress', protocol='tcp', + port_range_min=443, port_range_max=443, )] + t_id = network_lbaas_1['tenant_id'] + for rule in lbaas_rules: + self.add_security_group_rule(self.sg, rule, + secclient=sec_client, + ruleclient=sec_rule_client, + tenant_id=t_id) + body = {"network_id": network_lbaas_1['id'], + "allocation_pools": [{"start": "2.0.0.2", "end": "2.0.0.254"}], + "ip_version": 4, "cidr": "2.0.0.0/24"} + subnet_client = self.cmgr_adm.subnets_client + subnet_lbaas1 = subnet_client.create_subnet(**body) + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + subnet_client.delete_subnet, + subnet_lbaas1['subnet']['id']) + body = {"network_id": network_lbaas_2['id'], + "allocation_pools": [{"start": "3.0.0.2", "end": "3.0.0.254"}], + "ip_version": 4, "cidr": "3.0.0.0/24"} + subnet_client = self.cmgr_adm.subnets_client + subnet_lbaas2 = subnet_client.create_subnet(**body) + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + subnet_client.delete_subnet, + subnet_lbaas2['subnet']['id']) + self.cmgr_adm.routers_client.\ + add_router_interface(router_lbaas['router']['id'], + subnet_id=subnet_lbaas1['subnet']['id']) + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + self.cmgr_adm.routers_client.remove_router_interface, + router_lbaas['router']['id'], + subnet_id=subnet_lbaas1['subnet']['id']) + self.cmgr_adm.routers_client.\ + add_router_interface(router_lbaas['router']['id'], + subnet_id=subnet_lbaas2['subnet']['id']) + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + self.cmgr_adm.routers_client.remove_router_interface, + router_lbaas['router']['id'], + subnet_id=subnet_lbaas2['subnet']['id']) + for instance in range(0, no_of_servers): + self.create_topology_instance( + "server_lbaas1_%s" % instance, [network_lbaas_1], + security_groups=[{'name': self.sg['name']}], + image_id=image_id, clients=self.cmgr_adm) + self.topology_servers1 = self.topology_servers + for instance in range(0, no_of_servers): + self.create_topology_instance( + "server_lbaas2_%s" % instance, [network_lbaas_2], + security_groups=[{'name': self.sg['name']}], + image_id=image_id, clients=self.cmgr_adm) + self.topology_servers2 = self.topology_servers + return dict(router=router_lbaas, subnet1=subnet_lbaas1, + subnet2=subnet_lbaas2, network1=network_lbaas_1, + network2=network_lbaas_2) + + def _create_multi_listener_topology(self, lb_id=None, lb_name=None): + self.listener_1 = self.octavia_admin_listener_client.\ + create_octavia_listener(loadbalancer_id=lb_id, + protocol='HTTP', + protocol_port='80', + allowed_cidrs=None, + name=lb_name + 'listener_1')['listener'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.listener_2 = self.octavia_admin_listener_client.\ + create_octavia_listener(loadbalancer_id=lb_id, + protocol='TCP', + protocol_port='1212', + allowed_cidrs=None, + name=lb_name + 'listener_2')['listener'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.listener_3 = self.octavia_admin_listener_client.\ + create_octavia_listener(loadbalancer_id=lb_id, + protocol='HTTPS', + protocol_port='443', + allowed_cidrs=None, + name=lb_name + 'listener_3')['listener'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.pool_1 = self.octavia_admin_pools_client.\ + create_octavia_pool(listener_id=self.listener_1['id'], + lb_algorithm='ROUND_ROBIN', + protocol='HTTP', + name=lb_name + 'pool_1', + session_persistence=None)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.pool_2 = self.octavia_admin_pools_client.\ + create_octavia_pool(listener_id=self.listener_2['id'], + lb_algorithm='LEAST_CONNECTIONS', + protocol='TCP', + name=lb_name + 'pool_2', + session_persistence=None)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.pool_3 = self.octavia_admin_pools_client.\ + create_octavia_pool(listener_id=self.listener_3['id'], + lb_algorithm='SOURCE_IP', + protocol='TCP', + name=lb_name + 'pool_3', + session_persistence=None)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + pool_1_id = self.pool_1['id'] + pool_2_id = self.pool_2['id'] + pool_3_id = self.pool_3['id'] + self.healthmonitor_1 = self.octavia_hm_client.\ + create_octavia_hm(pool_id=pool_1_id, type='PING', delay=2, + timeout=10, max_retries=5, + name=lb_name)['healthmonitor'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.healthmonitor_2 = self.octavia_hm_client.\ + create_octavia_hm(pool_id=pool_2_id, type='TCP', delay=2, + timeout=10, max_retries=5, + name=lb_name)['healthmonitor'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.healthmonitor_3 = self.octavia_hm_client.\ + create_octavia_hm(pool_id=pool_3_id, type='HTTP', delay=2, + timeout=10, max_retries=5, + name=lb_name)['healthmonitor'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + @decorators.attr(type='nsxv') @decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a30') def test_create_verify_octavia_lb_with_vip_subnet_id_rr(self): @@ -664,6 +1013,40 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): delay=self.hm_delay, default_pool=True) self.check_project_lbaas() + @decorators.attr(type='nsxv') + @decorators.idempotent_id('c5ac8546-6867-4b7b-8704-3843a11c1a58') + def test_octavia_check_certificate_at_backend(self): + """ + Create octavia loadbalancer with http traffic with barbican enabled. + """ + diction = self.deploy_octavia_topology() + subnet_id = diction['subnet']['subnet']['id'] + if not CONF.nsxv3.ens: + self.start_web_servers(constants.HTTP_PORT) + barbican_secrets = self.create_barbican_secret_conatainer( + constants.CERT_FILE, constants.KEY_FILE) + barbican_container = barbican_secrets['secret_container'] + self.create_project_octavia(protocol_type="TERMINATED_HTTPS", + protocol_port="443", + lb_algorithm="ROUND_ROBIN", + vip_subnet_id=subnet_id, + hm_type='HTTP', + member_count=2, + weight=5, + pool_protocol='HTTP', + pool_port='80', + barbican_container=barbican_container, + count=0, barbican=True, + delay=self.hm_delay, + max_retries=self.hm_max_retries, + timeout=self.hm_timeout) + self.check_lbaas_project_weight_values(HTTPS=True) + cert_file = open(constants.CERT_FILE, "r") + cert_content = cert_file.read() + if CONF.network.backend == "nsxv": + self.vsm.check_cert_at_backend(lb_id=self.loadbalancer['id'], + cert_conent=cert_content.rstrip()) + @decorators.attr(type='nsxv') @decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a61') def test_create_verify_octavia_lb_with_vip_subnet_id(self): @@ -760,7 +1143,8 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): lb_algorithm="ROUND_ROBIN", vip_subnet_id=subnet_id, l7policy=True, - action='REDIRECT_TO_URL') + action='REDIRECT_TO_URL', + redirect_url='http://www.vmware.com') @decorators.attr(type='nsxv') @decorators.idempotent_id('ca5c4368-6768-4a7b-8704-3844b11b1b66') @@ -792,7 +1176,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): @decorators.attr(type='nsxv') @decorators.idempotent_id('ca5c4368-6769-4a7b-8704-3844b11b1b66') - def test_delete_lb_with_cascade(self): + def test_delete_lb_with_cascade_without_member(self): diction = self.deploy_octavia_topology() subnet_id = diction['subnet']['subnet']['id'] lb_name = data_utils.rand_name(self.namestart) @@ -809,33 +1193,208 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): allowed_cidrs=None, name=lb_name)['listener'] self.octavia_admin_client.wait_for_load_balancer_status(lb_id) - self.listener2 = self.octavia_admin_listener_client.\ + self.pool = self.octavia_admin_pools_client.\ + create_octavia_pool(listener_id=self.listener['id'], + lb_algorithm='ROUND_ROBIN', + protocol='TCP', + name=lb_name, + session_persistence=None)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + pool_id = self.pool['id'] + self.healthmonitor = self.octavia_hm_client.\ + create_octavia_hm(pool_id=pool_id, type='PING', delay=2, + timeout=10, max_retries=5, + name=lb_name)['healthmonitor'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_client.\ + delete_octavia_load_balancer_with_cascade(lb_id) + self.octavia_admin_client.\ + wait_for_load_balancer_status(lb_id, is_delete_op=True) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + hmonitor=self.healthmonitor, + cleanup=["listener", "pool", "hm"]) + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + self.assertFalse(lb_name in lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') + def test_delete_lb_with_cascade_with_member(self): + diction = self.deploy_octavia_topology() + subnet_id = diction['subnet']['subnet']['id'] + self.start_web_servers('1212') + self.create_project_octavia(protocol_type="TCP", protocol_port="1212", + lb_algorithm="LEAST_CONNECTIONS", + hm_type='PING', vip_subnet_id=subnet_id, + default_pool=True, + timeout=self.hm_timeout, clean_up=False, + max_retries=self.hm_max_retries, + delay=self.hm_delay) + lb_id = self.loadbalancer['id'] + self.octavia_admin_client.\ + delete_octavia_load_balancer_with_cascade(lb_id) + self.octavia_admin_client.\ + wait_for_load_balancer_status(lb_id, is_delete_op=True) + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + self.assertFalse(self.loadbalancer['name'] in lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') + def test_lb_crud_with_backend_verification(self): + diction = self.deploy_octavia_topology() + subnet_id = diction['subnet']['subnet']['id'] + self.start_web_servers('1212') + self.create_project_octavia(protocol_type="TCP", protocol_port="1212", + lb_algorithm="LEAST_CONNECTIONS", + hm_type='PING', vip_subnet_id=subnet_id, + default_pool=True, + timeout=self.hm_timeout, clean_up=False, + max_retries=self.hm_max_retries, + delay=self.hm_delay) + self.vip_ip_address = self.vip_ip_address + ':1212' + self.check_project_lbaas() + lb_id = self.loadbalancer['id'] + self.pool = self.pool['pool'] + self.healthmonitor = self.healthmonitor['healthmonitor'] + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + hmonitor=self.healthmonitor, + member=self.members[0]['member'], + cleanup=[]) + # Update all components + self._update_lb_components(lb_id, self.healthmonitor, + self.members[0]['member'], self.pool, + self.listener) + # Delete & verify + self.octavia_hm_client.\ + delete_octavia_hm(self.healthmonitor['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_pools_client.\ + delete_octavia_pool(self.pool['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_listener_client.\ + delete_octavia_listener(self.listener['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + # verify health monitor, pool, listener got deleted from edge + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + hmonitor=self.healthmonitor, + member=self.members[0]['member'], + cleanup=["ALL"]) + self.octavia_admin_client.delete_octavia_load_balancer(lb_id) + self.octavia_admin_client.\ + wait_for_load_balancer_status(lb_id, is_delete_op=True) + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + self.assertFalse(self.loadbalancer['name'] in lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') + def test_deletion_pool_remove_hm_with_backend_verification(self): + diction = self.deploy_octavia_topology() + subnet_id = diction['subnet']['subnet']['id'] + self.create_project_octavia(protocol_type="TCP", protocol_port="1212", + lb_algorithm="ROUND_ROBIN", + hm_type='PING', vip_subnet_id=subnet_id, + default_pool=True, + timeout=self.hm_timeout, clean_up=False, + max_retries=self.hm_max_retries, + delay=self.hm_delay) + self.pool = self.pool['pool'] + self.healthmonitor = self.healthmonitor['healthmonitor'] + lb_id = self.loadbalancer['id'] + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + hmonitor=self.healthmonitor, + member=self.members[0]['member'], + cleanup=[]) + # Delete pools member then pool & listener, + # should remove healthmonitor + for member in self.members: + self.octavia_admin_members_client.\ + delete_octavia_member(self.pool['id'], member['member']['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_pools_client.\ + delete_octavia_pool(self.pool['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_listener_client.\ + delete_octavia_listener(self.listener['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + # verify health monitor, pool, listener got deleted from edge + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + hmonitor=self.healthmonitor, + member=self.members[0]['member'], + cleanup=["ALL"]) + self.octavia_admin_client.delete_octavia_load_balancer(lb_id) + self.octavia_admin_client.\ + wait_for_load_balancer_status(lb_id, is_delete_op=True) + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + self.assertFalse(self.loadbalancer['name'] in lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('ca6c4368-6770-4a7b-8704-3844b11b1b61') + def test_vip_memebr_in_external_subnet(self): + diction = self.deploy_octavia_topology() + self.start_web_servers('1212') + ext_gw = diction['router']['router']['external_gateway_info'] + ext_subnet_id = ext_gw['external_fixed_ips'][0]['subnet_id'] + lb_name = data_utils.rand_name(self.namestart) + self.loadbalancer = self.octavia_admin_client.\ + create_octavia_load_balancer(name=lb_name, + vip_subnet_id=ext_subnet_id, + admin_state_up=True)['loadbalancer'] + lb_id = self.loadbalancer['id'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.listener = self.octavia_admin_listener_client.\ create_octavia_listener(loadbalancer_id=lb_id, protocol='TCP', protocol_port='1212', allowed_cidrs=None, - name='listener2')['listener'] - self.octavia_admin_client.wait_for_load_balancer_status(lb_id) - self.pool2 = self.octavia_admin_pools_client.\ - create_octavia_pool(listener_id=self.listener2['id'], - lb_algorithm='ROUND_ROBIN', - protocol='TCP', - name='pool2', - session_persistence=None) + name=lb_name)['listener'] self.octavia_admin_client.wait_for_load_balancer_status(lb_id) self.pool = self.octavia_admin_pools_client.\ create_octavia_pool(listener_id=self.listener['id'], lb_algorithm='ROUND_ROBIN', protocol='TCP', name=lb_name, - session_persistence=None) + session_persistence=None)['pool'] self.octavia_admin_client.wait_for_load_balancer_status(lb_id) - pool_id = self.pool['pool']['id'] + pool_id = self.pool['id'] + self.members = [] + for s in self.topology_servers.keys(): + fip_data = self.servers_details[s].floating_ips[0] + floating_ip_address = fip_data['floating_ip_address'] + member = self.octavia_admin_members_client.\ + create_octavia_member(pool_id, + subnet_id=ext_subnet_id, + address=floating_ip_address, + protocol_port='1212', + weight=1)['member'] + self.members.append(member) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) self.healthmonitor = self.octavia_hm_client.\ create_octavia_hm(pool_id=pool_id, type='PING', delay=2, timeout=10, max_retries=5, - name=lb_name) - self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + name=lb_name)['healthmonitor'] + self.check_project_lbaas() + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, member=self.members[0], + hmonitor=self.healthmonitor, + cleanup=[]) self.octavia_admin_client.\ delete_octavia_load_balancer_with_cascade(lb_id) self.octavia_admin_client.\ @@ -844,3 +1403,331 @@ class OctaviaRoundRobin(feature_manager.FeatureManager): list_octavia_load_balancers()['loadbalancers'] lb_names = [lb['name'] for lb in lbs] self.assertFalse(lb_name in lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') + def test_lb_crud_with_multi_listeners(self): + """ + create lb with three listener, each listener with one poool + each pool has two memebrs, each pool has health monitor + verify traffic with each listener + Update each each component and verify lb sttatus + Delete one of listenr & respecitve pool, should not impact + another listner , then lb sith cascade option which should + delete all componets + """ + diction = self.deploy_octavia_topology(no_of_servers=6) + self.start_web_servers(constants.HTTP_PORT) + subnet_id = diction['subnet']['subnet']['id'] + lb_name = data_utils.rand_name(self.namestart) + self.loadbalancer = self.octavia_admin_client.\ + create_octavia_load_balancer(name=lb_name, + vip_subnet_id=subnet_id, + admin_state_up=True)['loadbalancer'] + lb_id = self.loadbalancer['id'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self._create_multi_listener_topology(lb_id=lb_id, lb_name=lb_name) + # update two per each pool member + self.members = [] + for s in self.topology_servers.keys(): + fip_data = self.servers_details[s].floating_ips[0] + fixed_ip_address = fip_data['fixed_ip_address'] + servers = list(self.topology_servers.keys()) + if servers.index(s) <= 1: + member = self.octavia_admin_members_client.\ + create_octavia_member(self.pool_1['id'], + subnet_id=subnet_id, + address=fixed_ip_address, + protocol_port='80', + weight=1)['member'] + elif servers.index(s) == 2 or servers.index(s) == 3: + member = self.octavia_admin_members_client.\ + create_octavia_member(self.pool_2['id'], + subnet_id=subnet_id, + address=fixed_ip_address, + protocol_port='80', + weight=2)['member'] + else: + member = self.octavia_admin_members_client.\ + create_octavia_member(self.pool_3['id'], + subnet_id=subnet_id, + address=fixed_ip_address, + protocol_port='80', + weight=2)['member'] + self.members.append(member) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener_1, + pool=self.pool_1, + member=self.members[0], + hmonitor=self.healthmonitor_1, + cleanup=[]) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener_3, + pool=self.pool_3, + member=self.members[4], + hmonitor=self.healthmonitor_3, + cleanup=[]) + # Assign floating ip to vip + self._assign_floating_ip_to_vip() + # verify listener1 traffic + self.check_project_lbaas() + # verify listener2 traffic + self.vip_ip_address = self.vip_ip_address + ':1212' + self.do_http_request(vip=self.vip_ip_address, + send_counts=self.poke_counters) + self.assertTrue(len(self.http_cnt) == 2) + # Update all components + self._update_lb_components(lb_id, self.healthmonitor_1, + self.members[0], self.pool_1, + self.listener_1) + self.listener_1, self.pool_1, self.healthmonitor_1 =\ + self.listener, self.pool, self.healthmonitor + # Delete third listener & pool + self.octavia_admin_members_client.\ + delete_octavia_member(self.pool_3['id'], self.members[4]['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_members_client.\ + delete_octavia_member(self.pool_3['id'], self.members[5]['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_pools_client.\ + delete_octavia_pool(self.pool_3['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_listener_client.\ + delete_octavia_listener(self.listener_3['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + # verify health monitor, pool, listener got deleted from edge + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener_3, + pool=self.pool_3, + hmonitor=self.healthmonitor_3, + member=self.members[5], + cleanup=["ALL"]) + # verify listener_2 present + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener_1, + pool=self.pool_1, + member=self.members[1], + hmonitor=self.healthmonitor_1, + cleanup=[]) + self.octavia_admin_client.\ + delete_octavia_load_balancer_with_cascade(lb_id) + self.octavia_admin_client.\ + wait_for_load_balancer_status(lb_id, + is_delete_op=True) + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + self.assertFalse(self.loadbalancer['name'] in lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66') + def test_delete_member_when_vip_member_diff_subnet(self): + diction = self.deploy_octavia_topology_with_multi_network() + self.start_web_servers('1212') + subnet_id = diction['subnet1']['subnet']['id'] + subnet_id2 = diction['subnet2']['subnet']['id'] + lb_name = data_utils.rand_name(self.namestart) + self.loadbalancer = self.octavia_admin_client.\ + create_octavia_load_balancer(name=lb_name, + vip_subnet_id=subnet_id, + admin_state_up=True)['loadbalancer'] + lb_id = self.loadbalancer['id'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.listener = self.octavia_admin_listener_client.\ + create_octavia_listener(loadbalancer_id=lb_id, + protocol='TCP', + protocol_port='1212', + allowed_cidrs=None, + name=lb_name)['listener'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.pool = self.octavia_admin_pools_client.\ + create_octavia_pool(listener_id=self.listener['id'], + lb_algorithm='ROUND_ROBIN', + protocol='TCP', + name=lb_name, + session_persistence=None)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + pool_id = self.pool['id'] + self.members = [] + for s in self.topology_servers.keys(): + fip_data = self.servers_details[s].floating_ips[0] + fixed_ip_address = fip_data['fixed_ip_address'] + servers = list(self.topology_servers.keys()) + if servers.index(s) <= 1: + member = self.octavia_admin_members_client.\ + create_octavia_member(pool_id, + subnet_id=subnet_id, + address=fixed_ip_address, + protocol_port='1212', + weight=1)['member'] + else: + member = self.octavia_admin_members_client.\ + create_octavia_member(pool_id, + subnet_id=subnet_id2, + address=fixed_ip_address, + protocol_port='1212', + weight=1)['member'] + self.members.append(member) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.healthmonitor = self.octavia_hm_client.\ + create_octavia_hm(pool_id=pool_id, type='PING', delay=2, + timeout=10, max_retries=5, + name=lb_name)['healthmonitor'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + member=self.members[0], + hmonitor=self.healthmonitor, + cleanup=[]) + # Delete one member of same subnet as vip + self.octavia_admin_members_client.\ + delete_octavia_member(pool_id, self.members[0]['id']) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + member=self.members[0], + hmonitor=self.healthmonitor, + cleanup=['member']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + # Delete one member of diff subnet as vip + self.octavia_admin_members_client.\ + delete_octavia_member(pool_id, self.members[3]['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + member=self.members[3], + hmonitor=self.healthmonitor, + cleanup=['member']) + # Delete pool, listenr + self.octavia_admin_pools_client.\ + delete_octavia_pool(pool_id) + self.octavia_admin_listener_client.\ + delete_octavia_listener(self.listener['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + # verify health monitor, pool, listener got deleted from edge + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, member=self.members[0], + hmonitor=self.healthmonitor, + cleanup=['ALL']) + self.octavia_admin_client.delete_octavia_load_balancer(lb_id) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('60e9ecaf-b8d7-48a9-b0d2-942e5bb38f63') + def test_update_to_None_verify_octavia_session_persistence(self): + """ + To verify the server count for LB pool with SOURCE_IP + session persistence and ROUND_ROBIN lb-algorithm, + expected outcome is only one server responds to the + client requests. + Set session Persitence as None, verify backend and now + server count in traffic should be 2 + """ + diction = self.deploy_octavia_topology() + self.start_web_servers(constants.HTTP_PORT) + net_id = diction['network']['id'] + self.create_project_octavia(protocol_type="HTTP", protocol_port="80", + lb_algorithm="ROUND_ROBIN", + vip_net_id=net_id, persistence=True, + persistence_type="SOURCE_IP", + clean_up=False) + self.check_lbaas_project_weight_values(constants.NO_OF_VMS_2, + hash_persistence=True) + # verify health monitor, pool, listener got deleted from edge + self.pool = self.pool['pool'] + lb_id, lb_name = self.loadbalancer['id'], self.loadbalancer['name'] + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + member=self.members[0]['member'], + session_persistence="SOURCE_IP", + cleanup=[]) + # Update pool to change name & disable session persistence + pool_data = {'name': 'newPool', 'lb_algorithm': 'LEAST_CONNECTIONS', + 'session_persistence': None} + self.pool = self.octavia_admin_pools_client.\ + update_octavia_pool(self.pool['id'], pool_data)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + member=self.members[0]['member'], + session_persistence="None", + cleanup=[]) + # Update pool to enable session persistence + pool_data = {'name': 'newPool', 'lb_algorithm': 'LEAST_CONNECTIONS', + "session_persistence": {"type": "SOURCE_IP"}} + self.pool = self.octavia_admin_pools_client.\ + update_octavia_pool(self.pool['id'], pool_data)['pool'] + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + if CONF.network.backend == "nsxv": + self._verify_lbaas_on_edge(lb_id, listener=self.listener, + pool=self.pool, + member=self.members[0]['member'], + session_persistence="SOURCE_IP", + cleanup=[]) + self.octavia_admin_pools_client.\ + delete_octavia_pool(self.pool['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_listener_client.\ + delete_octavia_listener(self.listener['id']) + self.octavia_admin_client.wait_for_load_balancer_status(lb_id) + self.octavia_admin_client.delete_octavia_load_balancer(lb_id) + self.octavia_admin_client.\ + wait_for_load_balancer_status(lb_id, is_delete_op=True) + lbs = self.octavia_admin_client.\ + list_octavia_load_balancers()['loadbalancers'] + lb_names = [lb['name'] for lb in lbs] + self.assertFalse(lb_name in lb_names) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34') + def test_verify_rbac_network_octavia_lb_admin(self): + """ + Fetch the status of loadbalancer resources which uses the + api of openstack loadbalancer status show + """ + diction = self.deploy_octavia_topology() + self.start_web_servers(constants.HTTP_PORT) + net_id = diction['network']['id'] + port_id = self.cmgr_adm.ports_client.create_port( + network_id=net_id)['port']['id'] + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + self.cmgr_adm.ports_client.delete_port, port_id) + self.rbac_client.create_rbac_policy(action="access_as_shared", + object_type="network", + object_id=net_id, + target_tenant="admin") + self.create_project_octavia(protocol_type="HTTPS", protocol_port="80", + lb_algorithm="ROUND_ROBIN", + vip_port_id=port_id, hm_type='PING', + timeout=self.hm_timeout, + max_retries=self.hm_max_retries, + delay=self.hm_delay, default_pool=True) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('d5ac8546-6867-4b7a-8544-3843a11b1a34') + def test_verify_octavia_lb_resource_status(self): + """ + Fetch the status of loadbalancer resources which uses the + api of openstack loadbalancer status show + """ + diction = self.deploy_octavia_topology() + self.start_web_servers(constants.HTTP_PORT) + net_id = diction['network']['id'] + port_id = self.cmgr_adm.ports_client.create_port( + network_id=net_id)['port']['id'] + self.addCleanup(test_utils.call_and_ignore_notfound_exc, + self.cmgr_adm.ports_client.delete_port, port_id) + self.create_project_octavia(protocol_type="HTTPS", protocol_port="80", + lb_algorithm="ROUND_ROBIN", + vip_port_id=port_id, hm_type='PING', + timeout=self.hm_timeout, + max_retries=self.hm_max_retries, + delay=self.hm_delay, default_pool=True) + lb_id = self.loadbalancer['id'] + noerr, status_dict = self.get_status_lb_resources(lb_id) + self.assertTrue(noerr, status_dict) diff --git a/vmware_nsx_tempest_plugin/tests/scenario/test_new_case_coverage.py b/vmware_nsx_tempest_plugin/tests/scenario/test_new_case_coverage.py index e6832bc..d26a253 100644 --- a/vmware_nsx_tempest_plugin/tests/scenario/test_new_case_coverage.py +++ b/vmware_nsx_tempest_plugin/tests/scenario/test_new_case_coverage.py @@ -1950,3 +1950,9 @@ class TestNewCase(feature_manager.FeatureManager): port_id = self.cmgr_adm.ports_client.create_port(**args)['port']['id'] self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.cmgr_adm.ports_client.delete_port, port_id) + + @decorators.attr(type='nsxv') + @decorators.idempotent_id('2226016b-91cc-8905-b217-22344cab24a2') + def test_verify_mdproxy_member_status(self): + """verify mdproxy edges has member status is up""" + self.assertTrue(self.vsm.verify_member_status_of_md_proxy_edges())