octavia api cases & othere octavia cases automation
Change-Id: I6e31b7b2179c656c30a46a6f9edf8e73a8596a30
This commit is contained in:
parent
83b8ebc983
commit
abf6fc9460
@ -175,7 +175,12 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
net_client.region,
|
net_client.region,
|
||||||
net_client.endpoint_type,
|
net_client.endpoint_type,
|
||||||
**_params)
|
**_params)
|
||||||
|
cls.octavia_admin_quota_client = openstack_network_clients.\
|
||||||
|
OctaviaQuotaClient(net_client.auth_provider,
|
||||||
|
net_client.service,
|
||||||
|
net_client.region,
|
||||||
|
net_client.endpoint_type,
|
||||||
|
**_params)
|
||||||
net_client.service = 'dns'
|
net_client.service = 'dns'
|
||||||
cls.zones_v2_client = openstack_network_clients.ZonesV2Client(
|
cls.zones_v2_client = openstack_network_clients.ZonesV2Client(
|
||||||
net_client.auth_provider,
|
net_client.auth_provider,
|
||||||
@ -825,7 +830,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
|||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
self.assertFalse('server_lbaas_0' in self.http_cnt, True)
|
self.assertNotIn('server_lbaas_0', self.http_cnt)
|
||||||
|
|
||||||
def count_response(self, response):
|
def count_response(self, response):
|
||||||
response = response.decode('utf-8')
|
response = response.decode('utf-8')
|
||||||
|
@ -146,6 +146,12 @@ class VSMClient(object):
|
|||||||
endpoint = "/edges/%s/clisettings" % (edge_detail['id'])
|
endpoint = "/edges/%s/clisettings" % (edge_detail['id'])
|
||||||
self.__set_endpoint(endpoint)
|
self.__set_endpoint(endpoint)
|
||||||
self.put(body=payload)
|
self.put(body=payload)
|
||||||
|
rules = [{'name': 'anyRule', "ruleType": "user", "enabled": 'true',
|
||||||
|
"action": "accept"}]
|
||||||
|
rule_payload = {"firewallRules": rules}
|
||||||
|
endpoint = "/edges/%s/firewall/config/rules" % (edge_detail['id'])
|
||||||
|
self.__set_endpoint(endpoint)
|
||||||
|
self.post(body=rule_payload)
|
||||||
|
|
||||||
def get_all_vdn_scopes(self):
|
def get_all_vdn_scopes(self):
|
||||||
"""Retrieve existing network scopes"""
|
"""Retrieve existing network scopes"""
|
||||||
@ -487,9 +493,9 @@ class VSMClient(object):
|
|||||||
if hmonitor:
|
if hmonitor:
|
||||||
hms_vsm = [hm['id'] for hm in lbaas_config['monitor']]
|
hms_vsm = [hm['id'] for hm in lbaas_config['monitor']]
|
||||||
if 'hm' in cleanup:
|
if 'hm' in cleanup:
|
||||||
self.assertFalse(hmonitor['id'] in hms_vsm)
|
self.assertNotIn(hmonitor['id'], hms_vsm)
|
||||||
else:
|
else:
|
||||||
self.assertTrue(hmonitor['id'] in hms_vsm)
|
self.assertIn(hmonitor['id'], hms_vsm)
|
||||||
if pool:
|
if pool:
|
||||||
pool_vsm = \
|
pool_vsm = \
|
||||||
[(p['name'], p['algorithm']) for p in lbaas_config['pool']]
|
[(p['name'], p['algorithm']) for p in lbaas_config['pool']]
|
||||||
|
@ -630,6 +630,30 @@ class ContainerClient(rest_client.RestClient):
|
|||||||
return body
|
return body
|
||||||
|
|
||||||
|
|
||||||
|
class OctaviaQuotaClient(base.BaseNetworkClient):
|
||||||
|
"""
|
||||||
|
The Client takes care of
|
||||||
|
listing quota,
|
||||||
|
set/unset quota,
|
||||||
|
listing default quota
|
||||||
|
"""
|
||||||
|
def list_project_quota(self, project_id):
|
||||||
|
uri = '/lbaas/quotas/' + project_id
|
||||||
|
return self.list_resources(uri)
|
||||||
|
|
||||||
|
def set_project_quota(self, project_id, **kwargs):
|
||||||
|
uri = '/lbaas/quotas/' + project_id
|
||||||
|
return self.update_resource(uri, kwargs, expect_response_code=202)
|
||||||
|
|
||||||
|
def delete_project_quota(self, project_id):
|
||||||
|
uri = '/lbaas/quotas/' + project_id
|
||||||
|
return self.delete_resource(uri, expect_response_code=202)
|
||||||
|
|
||||||
|
def list_default_quota(self):
|
||||||
|
uri = '/lbaas/quotas/defaults'
|
||||||
|
return self.list_resources(uri)
|
||||||
|
|
||||||
|
|
||||||
class OctaviaLB_Client(base.BaseNetworkClient):
|
class OctaviaLB_Client(base.BaseNetworkClient):
|
||||||
"""
|
"""
|
||||||
The Client takes care of
|
The Client takes care of
|
||||||
|
100
vmware_nsx_tempest_plugin/tests/nsxv/api/test_lb_quotas.py
Normal file
100
vmware_nsx_tempest_plugin/tests/nsxv/api/test_lb_quotas.py
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
# Copyright 2019 VMware Inc
|
||||||
|
# All Rights Reserved
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
from tempest import config
|
||||||
|
from tempest.lib import decorators
|
||||||
|
from tempest import test
|
||||||
|
|
||||||
|
from vmware_nsx_tempest_plugin.common import constants
|
||||||
|
from vmware_nsx_tempest_plugin.lib import feature_manager
|
||||||
|
|
||||||
|
LOG = constants.log.getLogger(__name__)
|
||||||
|
CONF = config.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class OctaviaQuota(feature_manager.FeatureManager):
|
||||||
|
|
||||||
|
"""Base class to support LBaaS ROUND-ROBIN test.
|
||||||
|
|
||||||
|
It provides the methods to create loadbalancer network, and
|
||||||
|
start web servers.
|
||||||
|
|
||||||
|
Default lb_algorithm is ROUND_ROBIND.
|
||||||
|
"""
|
||||||
|
@classmethod
|
||||||
|
def setup_clients(cls):
|
||||||
|
super(OctaviaQuota, cls).setup_clients()
|
||||||
|
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||||
|
cls.cmgr_alt = cls.get_client_manager('alt')
|
||||||
|
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def skip_checks(cls):
|
||||||
|
super(OctaviaQuota, cls).skip_checks()
|
||||||
|
cfg = CONF.network
|
||||||
|
if not test.is_extension_enabled('lbaasv2', 'network'):
|
||||||
|
msg = 'lbaasv2 extension is not enabled.'
|
||||||
|
raise cls.skipException(msg)
|
||||||
|
if not (cfg.project_networks_reachable or cfg.public_network_id):
|
||||||
|
msg = ('Either project_networks_reachable must be "true", or '
|
||||||
|
'public_network_id must be defined.')
|
||||||
|
raise cls.skipException(msg)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setup_credentials(cls):
|
||||||
|
# Ask framework to not create network resources for these tests.
|
||||||
|
cls.set_network_resources()
|
||||||
|
super(OctaviaQuota, cls).setup_credentials()
|
||||||
|
|
||||||
|
@decorators.idempotent_id('c3ac8546-6867-4b7a-8544-3843a11b1a34')
|
||||||
|
def test_list_default_quota(self):
|
||||||
|
quotas = self.octavia_admin_quota_client.\
|
||||||
|
list_default_quota()['quota']
|
||||||
|
for quota in quotas:
|
||||||
|
msg = quota + '\'s quota is not reset to -1'
|
||||||
|
self.assertTrue(quotas[quota] == -1, msg)
|
||||||
|
|
||||||
|
@decorators.idempotent_id('c4ac8546-6867-4b7a-8544-3843a11b1a34')
|
||||||
|
def test_show_quota_and_set_quota_and_show_quota_to_verify(self):
|
||||||
|
project_id = CONF.auth.admin_tenant_id
|
||||||
|
quotas = self.octavia_admin_quota_client.\
|
||||||
|
list_project_quota(project_id)['quota']
|
||||||
|
# Update lb quota for project with increasing quota by 5
|
||||||
|
kwargs = {}
|
||||||
|
kwargs['quota'] = {
|
||||||
|
"loadbalancer": quotas['load_balancer'] + 5,
|
||||||
|
"listener": quotas['listener'] + 5,
|
||||||
|
"member": quotas['member'] + 5,
|
||||||
|
"pool": quotas['pool'] + 5,
|
||||||
|
"healthmonitor": quotas['health_monitor'] + 5,
|
||||||
|
}
|
||||||
|
self.octavia_admin_quota_client.\
|
||||||
|
set_project_quota(project_id, **kwargs)['quota']
|
||||||
|
updated_quota = self.octavia_admin_quota_client.\
|
||||||
|
list_project_quota(project_id)['quota']
|
||||||
|
self.assertTrue(updated_quota['load_balancer'] ==
|
||||||
|
(quotas['load_balancer'] + 5))
|
||||||
|
self.assertTrue(updated_quota['listener'] ==
|
||||||
|
(quotas['listener'] + 5))
|
||||||
|
|
||||||
|
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34')
|
||||||
|
def test_delete_quota_and_show_quota_to_verify(self):
|
||||||
|
project_id = CONF.auth.admin_tenant_id
|
||||||
|
self.octavia_admin_quota_client.\
|
||||||
|
delete_project_quota(project_id)
|
||||||
|
updated_quota = self.octavia_admin_quota_client.\
|
||||||
|
list_project_quota(project_id)['quota']
|
||||||
|
for quota in updated_quota:
|
||||||
|
msg = quota + '\'s quota is not reset to -1'
|
||||||
|
self.assertTrue(updated_quota[quota] == -1, msg)
|
@ -272,7 +272,8 @@ class ProviderSecGroup(base.BaseAdminNetworkTest):
|
|||||||
show_sec_group = sg_client.show_security_group(sg_id)
|
show_sec_group = sg_client.show_security_group(sg_id)
|
||||||
rules_list = show_sec_group['security_group']['security_group_rules']
|
rules_list = show_sec_group['security_group']['security_group_rules']
|
||||||
rules_id_list = [rule['id'] for rule in rules_list]
|
rules_id_list = [rule['id'] for rule in rules_list]
|
||||||
self.assertTrue(sg_rule1_id in rules_id_list)
|
sg_id_present = sg_rule1_id in rules_id_list
|
||||||
|
self.assertTrue(sg_id_present)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('edd94f8c-53b7-4286-9350-0ddc0af3213b')
|
@decorators.idempotent_id('edd94f8c-53b7-4286-9350-0ddc0af3213b')
|
||||||
|
@ -414,7 +414,8 @@ class LBaasRoundRobinBaseTest(dmgr.TopoDeployScenarioManager):
|
|||||||
'Admin!23Admin')
|
'Admin!23Admin')
|
||||||
cmd = 'show flowtable topN 20 '
|
cmd = 'show flowtable topN 20 '
|
||||||
output = ssh_client.exec_command(cmd)
|
output = ssh_client.exec_command(cmd)
|
||||||
self.assertTrue(vip in output)
|
vip_in_output = vip in output
|
||||||
|
self.assertTrue(vip_in_output)
|
||||||
|
|
||||||
|
|
||||||
class TestLBaasRoundRobinOps(LBaasRoundRobinBaseTest):
|
class TestLBaasRoundRobinOps(LBaasRoundRobinBaseTest):
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
from tempest.common.utils.linux import remote_client
|
||||||
from tempest import config
|
from tempest import config
|
||||||
from tempest.lib.common.utils import data_utils
|
from tempest.lib.common.utils import data_utils
|
||||||
from tempest.lib.common.utils import test_utils
|
from tempest.lib.common.utils import test_utils
|
||||||
@ -102,6 +103,29 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
LOG.debug("tearDown lbaas exiting...")
|
LOG.debug("tearDown lbaas exiting...")
|
||||||
super(OctaviaRoundRobin, self).tearDown()
|
super(OctaviaRoundRobin, self).tearDown()
|
||||||
|
|
||||||
|
def start_netcat_session(self, client_ip, server_ip, protocol_port=1212):
|
||||||
|
private_key = self.keypair['private_key']
|
||||||
|
ssh_client1 = self.get_remote_client(server_ip,
|
||||||
|
private_key=private_key)
|
||||||
|
vip = self.loadbalancer['vip_address']
|
||||||
|
cmd = ('nc -l -p %s &' % (protocol_port))
|
||||||
|
ssh_client1.exec_command(cmd)
|
||||||
|
ssh_client2 = self.get_remote_client(client_ip,
|
||||||
|
private_key=private_key)
|
||||||
|
cmd = ('nc %s %s &' % (vip, protocol_port))
|
||||||
|
ssh_client2.exec_command(cmd)
|
||||||
|
|
||||||
|
def verify_sessioin_edge(self, vip, router):
|
||||||
|
router_info = \
|
||||||
|
router['external_gateway_info']['external_fixed_ips']
|
||||||
|
router_ip = \
|
||||||
|
router_info[0]['ip_address']
|
||||||
|
ssh_client = remote_client.RemoteClient(router_ip, 'admin',
|
||||||
|
'Admin!23Admin')
|
||||||
|
cmd = 'show flowtable topN 20 '
|
||||||
|
output = ssh_client.exec_command(cmd)
|
||||||
|
self.assertIn(vip, output)
|
||||||
|
|
||||||
def _assign_floating_ip_to_vip(self):
|
def _assign_floating_ip_to_vip(self):
|
||||||
vip_port = self.loadbalancer['vip_port_id']
|
vip_port = self.loadbalancer['vip_port_id']
|
||||||
sg_id = self.sg['id']
|
sg_id = self.sg['id']
|
||||||
@ -417,17 +441,20 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
self.cmgr_adm.routers_client.remove_router_interface,
|
self.cmgr_adm.routers_client.remove_router_interface,
|
||||||
router_lbaas['router']['id'],
|
router_lbaas['router']['id'],
|
||||||
subnet_id=subnet_lbaas2['subnet']['id'])
|
subnet_id=subnet_lbaas2['subnet']['id'])
|
||||||
|
self.keypair = self.create_keypair(self.cmgr_adm.keypairs_client)
|
||||||
for instance in range(0, no_of_servers):
|
for instance in range(0, no_of_servers):
|
||||||
self.create_topology_instance(
|
self.create_topology_instance(
|
||||||
"server_lbaas1_%s" % instance, [network_lbaas_1],
|
"server_lbaas1_%s" % instance, [network_lbaas_1],
|
||||||
security_groups=[{'name': self.sg['name']}],
|
security_groups=[{'name': self.sg['name']}],
|
||||||
image_id=image_id, clients=self.cmgr_adm)
|
image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
keypair=self.keypair)
|
||||||
self.topology_servers1 = self.topology_servers
|
self.topology_servers1 = self.topology_servers
|
||||||
for instance in range(0, no_of_servers):
|
for instance in range(0, no_of_servers):
|
||||||
self.create_topology_instance(
|
self.create_topology_instance(
|
||||||
"server_lbaas2_%s" % instance, [network_lbaas_2],
|
"server_lbaas2_%s" % instance, [network_lbaas_2],
|
||||||
security_groups=[{'name': self.sg['name']}],
|
security_groups=[{'name': self.sg['name']}],
|
||||||
image_id=image_id, clients=self.cmgr_adm)
|
image_id=image_id, clients=self.cmgr_adm,
|
||||||
|
keypair=self.keypair)
|
||||||
self.topology_servers2 = self.topology_servers
|
self.topology_servers2 = self.topology_servers
|
||||||
return dict(router=router_lbaas, subnet1=subnet_lbaas1,
|
return dict(router=router_lbaas, subnet1=subnet_lbaas1,
|
||||||
subnet2=subnet_lbaas2, network1=network_lbaas_1,
|
subnet2=subnet_lbaas2, network1=network_lbaas_1,
|
||||||
@ -1218,7 +1245,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
lbs = self.octavia_admin_client.\
|
lbs = self.octavia_admin_client.\
|
||||||
list_octavia_load_balancers()['loadbalancers']
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
lb_names = [lb['name'] for lb in lbs]
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
self.assertFalse(lb_name in lb_names)
|
self.assertNotIn(lb_name, lb_names)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
||||||
@ -1241,7 +1268,39 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
lbs = self.octavia_admin_client.\
|
lbs = self.octavia_admin_client.\
|
||||||
list_octavia_load_balancers()['loadbalancers']
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
lb_names = [lb['name'] for lb in lbs]
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
self.assertFalse(self.loadbalancer['name'] in lb_names)
|
self.assertNotIn(self.loadbalancer['name'], lb_names)
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv')
|
||||||
|
@decorators.idempotent_id('ca5d4368-6770-4a7b-8704-3845b11b1b66')
|
||||||
|
def test_delete_lb_with_cascade_when_pool_without_attaching_listener(self):
|
||||||
|
diction = self.deploy_octavia_topology()
|
||||||
|
subnet_id = diction['subnet']['subnet']['id']
|
||||||
|
self.create_project_octavia(protocol_type="TCP", protocol_port="1212",
|
||||||
|
lb_algorithm="LEAST_CONNECTIONS",
|
||||||
|
hm_type='PING', vip_subnet_id=subnet_id,
|
||||||
|
default_pool=True,
|
||||||
|
timeout=self.hm_timeout, clean_up=False,
|
||||||
|
max_retries=self.hm_max_retries,
|
||||||
|
delay=self.hm_delay)
|
||||||
|
lb_id = self.loadbalancer['id']
|
||||||
|
self.octavia_admin_pools_client.\
|
||||||
|
delete_octavia_pool(self.pool['pool']['id'])
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
self.pool = self.octavia_admin_pools_client.\
|
||||||
|
create_octavia_pool(loadbalancer_id=lb_id,
|
||||||
|
lb_algorithm='ROUND_ROBIN',
|
||||||
|
protocol='TCP',
|
||||||
|
name='NewPool',
|
||||||
|
session_persistence=None)['pool']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
delete_octavia_load_balancer_with_cascade(lb_id)
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
wait_for_load_balancer_status(lb_id, is_delete_op=True)
|
||||||
|
lbs = self.octavia_admin_client.\
|
||||||
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
|
self.assertNotIn(self.loadbalancer['name'], lb_names)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
||||||
@ -1294,7 +1353,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
lbs = self.octavia_admin_client.\
|
lbs = self.octavia_admin_client.\
|
||||||
list_octavia_load_balancers()['loadbalancers']
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
lb_names = [lb['name'] for lb in lbs]
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
self.assertFalse(self.loadbalancer['name'] in lb_names)
|
self.assertNotIn(self.loadbalancer['name'], lb_names)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
||||||
@ -1342,7 +1401,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
lbs = self.octavia_admin_client.\
|
lbs = self.octavia_admin_client.\
|
||||||
list_octavia_load_balancers()['loadbalancers']
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
lb_names = [lb['name'] for lb in lbs]
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
self.assertFalse(self.loadbalancer['name'] in lb_names)
|
self.assertNotIn(self.loadbalancer['name'], lb_names)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('ca6c4368-6770-4a7b-8704-3844b11b1b61')
|
@decorators.idempotent_id('ca6c4368-6770-4a7b-8704-3844b11b1b61')
|
||||||
@ -1402,7 +1461,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
lbs = self.octavia_admin_client.\
|
lbs = self.octavia_admin_client.\
|
||||||
list_octavia_load_balancers()['loadbalancers']
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
lb_names = [lb['name'] for lb in lbs]
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
self.assertFalse(lb_name in lb_names)
|
self.assertNotIn(lb_name, lb_names)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
||||||
@ -1518,7 +1577,84 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
lbs = self.octavia_admin_client.\
|
lbs = self.octavia_admin_client.\
|
||||||
list_octavia_load_balancers()['loadbalancers']
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
lb_names = [lb['name'] for lb in lbs]
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
self.assertFalse(self.loadbalancer['name'] in lb_names)
|
self.assertNotIn(self.loadbalancer['name'], lb_names)
|
||||||
|
|
||||||
|
@decorators.attr(type='nsxv')
|
||||||
|
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11c1b66')
|
||||||
|
def test_delete_second_lb_verify_tcp_connection_with_first_lb(self):
|
||||||
|
diction = \
|
||||||
|
self.deploy_octavia_topology_with_multi_network(no_of_servers=1)
|
||||||
|
subnet_id = diction['subnet1']['subnet']['id']
|
||||||
|
router = diction['router']['router']
|
||||||
|
# Create first lb
|
||||||
|
lb_name = data_utils.rand_name(self.namestart)
|
||||||
|
self.loadbalancer = self.octavia_admin_client.\
|
||||||
|
create_octavia_load_balancer(name=lb_name,
|
||||||
|
vip_subnet_id=subnet_id,
|
||||||
|
admin_state_up=True)['loadbalancer']
|
||||||
|
lb_id = self.loadbalancer['id']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
self.listener = self.octavia_admin_listener_client.\
|
||||||
|
create_octavia_listener(loadbalancer_id=lb_id,
|
||||||
|
protocol='TCP',
|
||||||
|
protocol_port='1212',
|
||||||
|
allowed_cidrs=None,
|
||||||
|
name=lb_name)['listener']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
self.pool = self.octavia_admin_pools_client.\
|
||||||
|
create_octavia_pool(listener_id=self.listener['id'],
|
||||||
|
lb_algorithm='ROUND_ROBIN',
|
||||||
|
protocol='TCP',
|
||||||
|
name=lb_name,
|
||||||
|
session_persistence=None)['pool']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
pool_id = self.pool['id']
|
||||||
|
for s in self.topology_servers.keys():
|
||||||
|
fip_data = self.servers_details[s].floating_ips[0]
|
||||||
|
fixed_ip_address = fip_data['fixed_ip_address']
|
||||||
|
servers = list(self.topology_servers.keys())
|
||||||
|
# Adding one VM as member
|
||||||
|
if servers.index(s) == 0:
|
||||||
|
self.octavia_admin_members_client.\
|
||||||
|
create_octavia_member(pool_id,
|
||||||
|
subnet_id=subnet_id,
|
||||||
|
address=fixed_ip_address,
|
||||||
|
protocol_port='1212',
|
||||||
|
weight=1)['member']
|
||||||
|
server1_fip = fip_data['floating_ip_address']
|
||||||
|
else:
|
||||||
|
client1_fip = fip_data['floating_ip_address']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
self.healthmonitor = self.octavia_hm_client.\
|
||||||
|
create_octavia_hm(pool_id=pool_id, type='PING', delay=2,
|
||||||
|
timeout=10, max_retries=5,
|
||||||
|
name=lb_name)['healthmonitor']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||||
|
# LB creation is done, start netcat session and verify on edge
|
||||||
|
self.start_netcat_session(client1_fip, server1_fip,
|
||||||
|
protocol_port=1212)
|
||||||
|
vip = self.loadbalancer['vip_address']
|
||||||
|
self.vsm.enable_ssh_on_edge(router['name'], router['id'])
|
||||||
|
self.verify_session_edge(vip, router)
|
||||||
|
lb2_name = data_utils.rand_name(self.namestart)
|
||||||
|
self.loadbalancer_2 = self.octavia_admin_client.\
|
||||||
|
create_octavia_load_balancer(name=lb2_name,
|
||||||
|
vip_subnet_id=subnet_id,
|
||||||
|
admin_state_up=True)['loadbalancer']
|
||||||
|
lb2_id = self.loadbalancer_2['id']
|
||||||
|
self.octavia_admin_client.wait_for_load_balancer_status(lb2_id)
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
delete_octavia_load_balancer_with_cascade(lb2_id)
|
||||||
|
self.verify_session_edge(vip, router)
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
delete_octavia_load_balancer_with_cascade(lb_id)
|
||||||
|
self.octavia_admin_client.\
|
||||||
|
wait_for_load_balancer_status(lb_id,
|
||||||
|
is_delete_op=True)
|
||||||
|
lbs = self.octavia_admin_client.\
|
||||||
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
|
self.assertNotIn(self.loadbalancer['name'], lb_names)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
@decorators.idempotent_id('ca5c4368-6770-4a7b-8704-3844b11b1b66')
|
||||||
@ -1681,7 +1817,7 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
|||||||
lbs = self.octavia_admin_client.\
|
lbs = self.octavia_admin_client.\
|
||||||
list_octavia_load_balancers()['loadbalancers']
|
list_octavia_load_balancers()['loadbalancers']
|
||||||
lb_names = [lb['name'] for lb in lbs]
|
lb_names = [lb['name'] for lb in lbs]
|
||||||
self.assertFalse(lb_name in lb_names)
|
self.assertNotIn(lb_name, lb_names)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34')
|
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34')
|
||||||
|
@ -72,7 +72,7 @@ class NSXv3NativeDHCPNegative(base.BaseNetworkTest):
|
|||||||
nsx_network = self.nsxp.get_logical_switch(network['name'],
|
nsx_network = self.nsxp.get_logical_switch(network['name'],
|
||||||
network['id'])
|
network['id'])
|
||||||
self.assertIsNotNone(nsx_network)
|
self.assertIsNotNone(nsx_network)
|
||||||
self.assertEqual('subnets' in nsx_network, False)
|
self.assertNotIn('subnets', nsx_network)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv3')
|
@decorators.attr(type='nsxv3')
|
||||||
@decorators.attr(type=['negative'])
|
@decorators.attr(type=['negative'])
|
||||||
@ -96,7 +96,7 @@ class NSXv3NativeDHCPNegative(base.BaseNetworkTest):
|
|||||||
nsx_network = self.nsxp.get_logical_switch(network['name'],
|
nsx_network = self.nsxp.get_logical_switch(network['name'],
|
||||||
network['id'])
|
network['id'])
|
||||||
self.assertIsNotNone(nsx_network)
|
self.assertIsNotNone(nsx_network)
|
||||||
self.assertEqual('subnets' in nsx_network, False)
|
self.assertNotIn('subnets', nsx_network)
|
||||||
|
|
||||||
@decorators.attr(type='nsxv3')
|
@decorators.attr(type='nsxv3')
|
||||||
@decorators.attr(type=['negative'])
|
@decorators.attr(type=['negative'])
|
||||||
@ -120,7 +120,7 @@ class NSXv3NativeDHCPNegative(base.BaseNetworkTest):
|
|||||||
nsx_network = self.nsxp.get_logical_switch(network['name'],
|
nsx_network = self.nsxp.get_logical_switch(network['name'],
|
||||||
network['id'])
|
network['id'])
|
||||||
self.assertIsNotNone(nsx_network)
|
self.assertIsNotNone(nsx_network)
|
||||||
self.assertEqual('subnets' in nsx_network, True)
|
self.assertIn('subnets', nsx_network)
|
||||||
# Update subnet to disable DHCP
|
# Update subnet to disable DHCP
|
||||||
self.subnets_client.update_subnet(subnet['id'], enable_dhcp=False)
|
self.subnets_client.update_subnet(subnet['id'], enable_dhcp=False)
|
||||||
if CONF.network.backend != 'nsxp':
|
if CONF.network.backend != 'nsxp':
|
||||||
|
@ -104,7 +104,7 @@ class NSXv3MacLearningTest(base.BaseNetworkTest):
|
|||||||
body = self.ports_client.list_ports()
|
body = self.ports_client.list_ports()
|
||||||
ports_list = body['ports']
|
ports_list = body['ports']
|
||||||
if len(ports_list) > 0:
|
if len(ports_list) > 0:
|
||||||
self.assertFalse(port_id in [n['id'] for n in ports_list],
|
self.assertNotIn(port_id, [n['id'] for n in ports_list],
|
||||||
"Deleted port still present in ports list")
|
"Deleted port still present in ports list")
|
||||||
|
|
||||||
def _conv_switch_prof_to_dict(self, switch_profiles):
|
def _conv_switch_prof_to_dict(self, switch_profiles):
|
||||||
|
@ -301,12 +301,14 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest):
|
|||||||
else:
|
else:
|
||||||
self.assertTrue(len(nat_rules) == 3)
|
self.assertTrue(len(nat_rules) == 3)
|
||||||
if CONF.network.backend == 'nsxp':
|
if CONF.network.backend == 'nsxp':
|
||||||
|
route_adv_type_nat = 'TIER1_NAT' in nsx_router_policy[
|
||||||
|
'route_advertisement_types']
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
'TIER1_NAT' in nsx_router_policy['route_advertisement_types'],
|
route_adv_type_nat,
|
||||||
nat_msg)
|
nat_msg)
|
||||||
self.assertFalse(
|
route_adv_type_conn = 'TIER1_CONNECTED' in nsx_router_policy[
|
||||||
'TIER1_CONNECTED' in nsx_router_policy[
|
'route_advertisement_types']
|
||||||
'route_advertisement_types'], adv_msg)
|
self.assertFalse(route_adv_type_conn, adv_msg)
|
||||||
else:
|
else:
|
||||||
self.assertTrue(router_adv['advertise_nat_routes'], nat_msg)
|
self.assertTrue(router_adv['advertise_nat_routes'], nat_msg)
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
@ -349,12 +351,14 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest):
|
|||||||
else:
|
else:
|
||||||
self.assertTrue(len(nat_rules) == 0)
|
self.assertTrue(len(nat_rules) == 0)
|
||||||
if CONF.network.backend == 'nsxp':
|
if CONF.network.backend == 'nsxp':
|
||||||
self.assertFalse(
|
route_adv_type_nat = 'TIER1_NAT' in nsx_router_policy[
|
||||||
'TIER1_NAT' in nsx_router_policy[
|
'route_advertisement_types']
|
||||||
'route_advertisement_types'], nat_msg)
|
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
'TIER1_CONNECTED' in nsx_router_policy[
|
route_adv_type_nat,
|
||||||
'route_advertisement_types'], adv_msg)
|
nat_msg)
|
||||||
|
route_adv_type_conn = 'TIER1_CONNECTED' in nsx_router_policy[
|
||||||
|
'route_advertisement_types']
|
||||||
|
self.assertFalse(route_adv_type_conn, adv_msg)
|
||||||
else:
|
else:
|
||||||
self.assertFalse(router_adv['advertise_nat_routes'], nat_msg)
|
self.assertFalse(router_adv['advertise_nat_routes'], nat_msg)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
@ -390,12 +394,14 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest):
|
|||||||
else:
|
else:
|
||||||
self.assertTrue(len(nat_rules) == 0)
|
self.assertTrue(len(nat_rules) == 0)
|
||||||
if CONF.network.backend == 'nsxp':
|
if CONF.network.backend == 'nsxp':
|
||||||
self.assertFalse(
|
route_adv_type_nat = 'TIER1_NAT' in nsx_router_policy[
|
||||||
'TIER1_NAT' in nsx_router_policy[
|
'route_advertisement_types']
|
||||||
'route_advertisement_types'], nat_msg)
|
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
'TIER1_CONNECTED' in nsx_router_policy[
|
route_adv_type_nat,
|
||||||
'route_advertisement_types'], adv_msg)
|
nat_msg)
|
||||||
|
route_adv_type_conn = 'TIER1_CONNECTED' in nsx_router_policy[
|
||||||
|
'route_advertisement_types']
|
||||||
|
self.assertFalse(route_adv_type_conn, adv_msg)
|
||||||
else:
|
else:
|
||||||
self.assertFalse(router_adv['advertise_nat_routes'], nat_msg)
|
self.assertFalse(router_adv['advertise_nat_routes'], nat_msg)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
@ -432,12 +438,14 @@ class TestRouterNoNATOps(manager.NetworkScenarioTest):
|
|||||||
else:
|
else:
|
||||||
self.assertTrue(len(nat_rules) == 3)
|
self.assertTrue(len(nat_rules) == 3)
|
||||||
if CONF.network.backend == 'nsxp':
|
if CONF.network.backend == 'nsxp':
|
||||||
|
route_adv_type_nat = 'TIER1_NAT' in nsx_router_policy[
|
||||||
|
'route_advertisement_types']
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
'TIER1_NAT' in nsx_router_policy[
|
route_adv_type_nat,
|
||||||
'route_advertisement_types'], nat_msg)
|
nat_msg)
|
||||||
self.assertFalse(
|
route_adv_type_conn = 'TIER1_CONNECTED' in nsx_router_policy[
|
||||||
'TIER1_CONNECTED' in nsx_router_policy[
|
'route_advertisement_types']
|
||||||
'route_advertisement_types'], adv_msg)
|
self.assertFalse(route_adv_type_conn, adv_msg)
|
||||||
else:
|
else:
|
||||||
self.assertTrue(router_adv['advertise_nat_routes'], nat_msg)
|
self.assertTrue(router_adv['advertise_nat_routes'], nat_msg)
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
|
@ -89,7 +89,8 @@ class MultipleAllowAddress(feature_manager.FeatureManager):
|
|||||||
|
|
||||||
ipList = [ip_mac['ip_address'] for ip_mac in port_details]
|
ipList = [ip_mac['ip_address'] for ip_mac in port_details]
|
||||||
msg = ipaddressORcidr + 'is not in allowed address'
|
msg = ipaddressORcidr + 'is not in allowed address'
|
||||||
self.assertTrue(ipaddressORcidr in ipList, msg)
|
ip_or_cidr_in_list = ipaddressORcidr in ipList
|
||||||
|
self.assertTrue(ip_or_cidr_in_list, msg)
|
||||||
|
|
||||||
@decorators.attr(type='negative')
|
@decorators.attr(type='negative')
|
||||||
@decorators.attr(type='nsxv')
|
@decorators.attr(type='nsxv')
|
||||||
|
Loading…
x
Reference in New Issue
Block a user