Octavia RBAC and pending tests.
Change-Id: Id4eb10825c95070dd06c8e3c70d5f3835301e249
This commit is contained in:
parent
45c8419c80
commit
4dc52cb9bd
@ -126,6 +126,12 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
cls.rbac_client = openstack_network_clients.\
|
||||
RBAC_Client(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
net_client.service = 'load-balancer'
|
||||
cls.octavia_admin_client = openstack_network_clients.\
|
||||
OctaviaLB_Client(net_client.auth_provider,
|
||||
@ -169,6 +175,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
|
||||
net_client.service = 'dns'
|
||||
cls.zones_v2_client = openstack_network_clients.ZonesV2Client(
|
||||
net_client.auth_provider,
|
||||
@ -1512,6 +1519,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
|
||||
"""
|
||||
oc_client = self.octavia_admin_client
|
||||
try:
|
||||
statuses = oc_client.show_octavia_lb_status_tree(lb_id)
|
||||
statuses = statuses.get('statuses', statuses)
|
||||
lb = statuses.get('loadbalancer')
|
||||
@ -1536,6 +1544,8 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
self.octavia_admin_client.\
|
||||
wait_for_load_balancer_status(lb_id,
|
||||
is_delete_op=True)
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
def delete_octavia_lb_pool_resources(self, lb_id, pool):
|
||||
"""Deletion of lbaas pool resources.
|
||||
@ -1575,7 +1585,8 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
type=None, value=None, barbican=False,
|
||||
barbican_container=None, invert=None,
|
||||
qos_policy_id=None, external=None,
|
||||
external_subnet=None, create_fip=None):
|
||||
external_subnet=None, create_fip=None,
|
||||
no_cleanup=False):
|
||||
count = 0
|
||||
lb_name = None
|
||||
if persistence:
|
||||
@ -1605,7 +1616,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
admin_state_up=True)['loadbalancer']
|
||||
lb_id = self.loadbalancer['id']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
if clean_up is None:
|
||||
if not no_cleanup:
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.octavia_admin_client.
|
||||
delete_octavia_load_balancer, lb_id)
|
||||
@ -1621,7 +1632,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
allowed_cidrs=allowed_cidrs,
|
||||
default_tls_container_ref=tls_id
|
||||
)['listener']
|
||||
if clean_up is None:
|
||||
if not no_cleanup:
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.octavia_admin_listener_client.
|
||||
delete_octavia_listener,
|
||||
@ -1662,7 +1673,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
session_persistence=session_persistence)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
pool_id = self.pool['pool']['id']
|
||||
if clean_up is None:
|
||||
if not no_cleanup:
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.octavia_admin_pools_client.
|
||||
delete_octavia_pool,
|
||||
@ -1673,7 +1684,7 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
timeout=timeout, max_retries=max_retries,
|
||||
name=lb_name)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
if clean_up is None:
|
||||
if not no_cleanup:
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.octavia_hm_client.
|
||||
delete_octavia_hm,
|
||||
@ -1896,3 +1907,116 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
port_id=self.loadbalancer['vip_port_id'])
|
||||
self.vip_ip_address = vip_fip['floating_ip_address']
|
||||
return 1
|
||||
|
||||
def get_status_lb_resources(self, lb_id):
|
||||
status = self.octavia_admin_client.show_octavia_lb_status_tree(lb_id)
|
||||
statuses = status['statuses']
|
||||
status_dict = {}
|
||||
status_dict['lb_operating'] = \
|
||||
statuses['loadbalancer']['operating_status']
|
||||
status_dict['provisioning_status'] = \
|
||||
statuses['loadbalancer']['provisioning_status']
|
||||
listeners = status['statuses']['loadbalancer']['listeners']
|
||||
status_dict['lstnr_op_status'] = \
|
||||
listeners[0]['operating_status']
|
||||
status_dict['lstnr_pr_status'] = \
|
||||
listeners[0]['provisioning_status']
|
||||
pools, hms = listeners[0]['pools'][0], \
|
||||
listeners[0]['pools'][0]['health_monitor']
|
||||
status_dict['pool_pr_status'] = pools['provisioning_status']
|
||||
status_dict['pool_op_status'] = pools['operating_status']
|
||||
status_dict['hm_pr_status'] = hms['provisioning_status']
|
||||
status_dict['hm_op_status'] = hms['operating_status']
|
||||
noerr = True
|
||||
for stat in list(status_dict.values()):
|
||||
if stat not in ('ACTIVE', 'ONLINE'):
|
||||
noerr = False
|
||||
break
|
||||
continue
|
||||
return noerr, status_dict
|
||||
|
||||
def deploy_octavia_topology(self, no_of_servers=2,
|
||||
image_id=None, slaac=False):
|
||||
kwargs = {'name': "router_lbaas",
|
||||
'external_gateway_info':
|
||||
{"network_id": CONF.network.public_network_id}}
|
||||
router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.routers_client.delete_router,
|
||||
router_lbaas['router']['id'])
|
||||
networks_client = self.cmgr_adm.networks_client
|
||||
name = "network_lbaas_1"
|
||||
network_lbaas_1 = self.\
|
||||
create_topology_network(name,
|
||||
networks_client=networks_client)
|
||||
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||
sec_client = self.cmgr_adm.security_groups_client
|
||||
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
|
||||
security_group_rules_client=sec_rule_client,
|
||||
security_groups_client=sec_client)
|
||||
self.sg = self.create_topology_security_group(**kwargs)
|
||||
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||
port_range_min=constants.HTTP_PORT,
|
||||
port_range_max=constants.HTTP_PORT, ),
|
||||
dict(direction='ingress', protocol='tcp',
|
||||
port_range_min=443, port_range_max=443, )]
|
||||
t_id = network_lbaas_1['tenant_id']
|
||||
for rule in lbaas_rules:
|
||||
self.add_security_group_rule(self.sg, rule,
|
||||
secclient=sec_client,
|
||||
ruleclient=sec_rule_client,
|
||||
tenant_id=t_id)
|
||||
if slaac:
|
||||
address_cidr = CONF.network.project_network_v6_cidr
|
||||
address_prefixlen = CONF.network.project_network_v6_mask_bits
|
||||
if ((address_prefixlen >= 126)):
|
||||
msg = ("Subnet %s isn't large" % address_cidr)
|
||||
raise lib_exc.InvalidConfiguration(msg)
|
||||
body = {'ip_version': 6, 'ipv6_ra_mode': 'slaac',
|
||||
'ipv6_address_mode': 'slaac', 'cidr': '2001:db8::/64',
|
||||
"network_id": network_lbaas_1['id'],
|
||||
'allocation_pools': [{
|
||||
'start': str(address_cidr).split('/')[0] + '2',
|
||||
'end': str(address_cidr).split('/')[0] + '70'}]}
|
||||
create_floating_ip = False
|
||||
else:
|
||||
body = {"network_id": network_lbaas_1['id'],
|
||||
"allocation_pools":
|
||||
[{"start": "2.0.0.2", "end": "2.0.0.254"}],
|
||||
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
||||
create_floating_ip = True
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
subnet_lbaas = subnet_client.create_subnet(**body)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnet_client.delete_subnet,
|
||||
subnet_lbaas['subnet']['id'])
|
||||
self.cmgr_adm.routers_client.\
|
||||
add_router_interface(router_lbaas['router']['id'],
|
||||
subnet_id=subnet_lbaas['subnet']['id'])
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.routers_client.remove_router_interface,
|
||||
router_lbaas['router']['id'],
|
||||
subnet_id=subnet_lbaas['subnet']['id'])
|
||||
for instance in range(0, no_of_servers):
|
||||
self.create_topology_instance(
|
||||
"server_lbaas_%s" % instance, [network_lbaas_1],
|
||||
security_groups=[{'name': self.sg['name']}],
|
||||
image_id=image_id, clients=self.cmgr_adm,
|
||||
create_floating_ip=create_floating_ip)
|
||||
return dict(router=router_lbaas, subnet=subnet_lbaas,
|
||||
network=network_lbaas_1)
|
||||
|
||||
def verifyBackendStatus_octavia_lb(self, status):
|
||||
activeStatus = ("UP", "ACTIVE")
|
||||
if status['service_status'] not in activeStatus:
|
||||
return False
|
||||
for vs in status['virtual_servers']:
|
||||
if vs['status'] not in activeStatus:
|
||||
return False
|
||||
for pool in status['pools']:
|
||||
if pool['status'] not in activeStatus:
|
||||
return False
|
||||
for member in pool['members']:
|
||||
if member['status'] not in activeStatus:
|
||||
return False
|
||||
return True
|
||||
|
@ -509,3 +509,29 @@ class NSXPClient(object):
|
||||
response = self.get(endpoint=endpoint)
|
||||
res_json = response.json()
|
||||
return res_json
|
||||
|
||||
def get_loadbalancers(self, backend_name):
|
||||
"""
|
||||
Retrieve all the loadbalancers.
|
||||
"""
|
||||
identity = [i['id'] for i in
|
||||
self.get_logical_resources("lb-services")
|
||||
if backend_name in i['display_name']]
|
||||
endpoint = "lb-services/{}/detailed-status".format(identity[0])
|
||||
return self.get_logical_resources(endpoint)
|
||||
|
||||
def get_loadbalancer_virtualservers(self):
|
||||
"""
|
||||
Retrieve all the logical loadbalancer.
|
||||
"""
|
||||
endpoint = "lb-virtual-servers"
|
||||
return self.get_logical_resources(endpoint)
|
||||
|
||||
def get_loadbalancer_virtual_server(self, os_name, os_uuid):
|
||||
if not os_name or not os_uuid:
|
||||
LOG.error("Name and uuid of OS loadbalancer should be present "
|
||||
"in order to query backend logical loadbalancer created")
|
||||
return None
|
||||
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
||||
loadbalancers = self.get_loadbalancer_virtualservers()
|
||||
return self.get_nsx_resource_by_name(loadbalancers, nsx_name)
|
||||
|
@ -868,3 +868,26 @@ class OctaviaL7RulesClient(base.BaseNetworkClient):
|
||||
uri = self.resource_base_path % l7policy_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
|
||||
class RBAC_Client(base.BaseNetworkClient):
|
||||
"""
|
||||
The Client takes care of
|
||||
Creating LB,
|
||||
Deleting LB,
|
||||
Returning the status
|
||||
Listing the status tree.
|
||||
"""
|
||||
resource = 'rbac-policies'
|
||||
resource_base_path = '/%s' % resource
|
||||
|
||||
def create_rbac_policy(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
diction = {}
|
||||
diction["rbac_policy"] = kwargs
|
||||
post_data = {self.resource: diction}
|
||||
return self.create_resource(uri, post_data['rbac-policies'])
|
||||
|
||||
def show_rbac_policies(self, **fields):
|
||||
uri = self.resource_base_path
|
||||
return self.show_resource(uri, **fields)
|
||||
|
@ -22,6 +22,7 @@ from tempest.lib.common.utils import test_utils
|
||||
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest_plugin.common import constants
|
||||
@ -107,78 +108,6 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||
LOG.debug("tearDown lbaas exiting...")
|
||||
super(OctaviaRoundRobin, self).tearDown()
|
||||
|
||||
def deploy_octavia_topology(self, no_of_servers=2,
|
||||
image_id=None, slaac=False):
|
||||
kwargs = {'name': "router_lbaas",
|
||||
'external_gateway_info':
|
||||
{"network_id": CONF.network.public_network_id}}
|
||||
router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.routers_client.delete_router,
|
||||
router_lbaas['router']['id'])
|
||||
networks_client = self.cmgr_adm.networks_client
|
||||
name = "network_lbaas_1"
|
||||
network_lbaas_1 = self.\
|
||||
create_topology_network(name,
|
||||
networks_client=networks_client)
|
||||
sec_rule_client = self.cmgr_adm.security_group_rules_client
|
||||
sec_client = self.cmgr_adm.security_groups_client
|
||||
kwargs = dict(tenant_id=network_lbaas_1['tenant_id'],
|
||||
security_group_rules_client=sec_rule_client,
|
||||
security_groups_client=sec_client)
|
||||
self.sg = self.create_topology_security_group(**kwargs)
|
||||
lbaas_rules = [dict(direction='ingress', protocol='tcp',
|
||||
port_range_min=constants.HTTP_PORT,
|
||||
port_range_max=constants.HTTP_PORT, ),
|
||||
dict(direction='ingress', protocol='tcp',
|
||||
port_range_min=443, port_range_max=443, )]
|
||||
t_id = network_lbaas_1['tenant_id']
|
||||
for rule in lbaas_rules:
|
||||
self.add_security_group_rule(self.sg, rule,
|
||||
secclient=sec_client,
|
||||
ruleclient=sec_rule_client,
|
||||
tenant_id=t_id)
|
||||
if slaac:
|
||||
address_cidr = CONF.network.project_network_v6_cidr
|
||||
address_prefixlen = CONF.network.project_network_v6_mask_bits
|
||||
if ((address_prefixlen >= 126)):
|
||||
msg = ("Subnet %s isn't large" % address_cidr)
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
body = {'ip_version': 6, 'ipv6_ra_mode': 'slaac',
|
||||
'ipv6_address_mode': 'slaac', 'cidr': '2001:db8::/64',
|
||||
"network_id": network_lbaas_1['id'],
|
||||
'allocation_pools': [{
|
||||
'start': str(address_cidr).split('/')[0] + '2',
|
||||
'end': str(address_cidr).split('/')[0] + '70'}]}
|
||||
create_floating_ip = False
|
||||
else:
|
||||
body = {"network_id": network_lbaas_1['id'],
|
||||
"allocation_pools":
|
||||
[{"start": "2.0.0.2", "end": "2.0.0.254"}],
|
||||
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
||||
create_floating_ip = True
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
subnet_lbaas = subnet_client.create_subnet(**body)
|
||||
time.sleep(constants.NSX_NETWORK_REALISE_TIMEOUT)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnet_client.delete_subnet,
|
||||
subnet_lbaas['subnet']['id'])
|
||||
self.cmgr_adm.routers_client.\
|
||||
add_router_interface(router_lbaas['router']['id'],
|
||||
subnet_id=subnet_lbaas['subnet']['id'])
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.routers_client.remove_router_interface,
|
||||
router_lbaas['router']['id'],
|
||||
subnet_id=subnet_lbaas['subnet']['id'])
|
||||
for instance in range(0, no_of_servers):
|
||||
self.create_topology_instance(
|
||||
"server_lbaas_%s" % instance, [network_lbaas_1],
|
||||
security_groups=[{'name': self.sg['name']}],
|
||||
image_id=image_id, clients=self.cmgr_adm,
|
||||
create_floating_ip=create_floating_ip)
|
||||
return dict(router=router_lbaas, subnet=subnet_lbaas,
|
||||
network=network_lbaas_1)
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('c5ac8546-6867-4b7a-8704-3844b11b1a34')
|
||||
def test_create_verify_octavia_lb_with_vip_subnet_id_rr(self):
|
||||
@ -1299,3 +1228,132 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||
l7policy=True,
|
||||
action='REDIRECT_TO_URL',
|
||||
fip_disassociate=True)
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34')
|
||||
def test_verify_octavia_lb_resource_status(self):
|
||||
"""
|
||||
Fetch the status of loadbalancer resources which uses the
|
||||
api of openstack loadbalancer status show <lb>
|
||||
"""
|
||||
diction = self.deploy_octavia_topology()
|
||||
self.start_web_servers(constants.HTTP_PORT)
|
||||
net_id = diction['network']['id']
|
||||
port_id = self.cmgr_adm.ports_client.create_port(
|
||||
network_id=net_id)['port']['id']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.ports_client.delete_port, port_id)
|
||||
self.create_project_octavia(protocol_type="HTTPS", protocol_port="80",
|
||||
lb_algorithm="ROUND_ROBIN",
|
||||
vip_port_id=port_id, hm_type='PING',
|
||||
timeout=self.hm_timeout,
|
||||
max_retries=self.hm_max_retries,
|
||||
delay=self.hm_delay, default_pool=True)
|
||||
lb_id = self.loadbalancer['id']
|
||||
noerr, status_dict = self.get_status_lb_resources(lb_id)
|
||||
self.assertTrue(noerr, status_dict)
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('ca5c4368-6768-4b7a-8704-3844b11b1a34')
|
||||
def test_verify_ipv6_octavia_lb_resource_status(self):
|
||||
"""
|
||||
Fetching status for ipv6 LB with show status api
|
||||
"""
|
||||
diction = self.deploy_octavia_topology()
|
||||
if not CONF.nsxv3.ens:
|
||||
self.start_web_servers(constants.HTTP_PORT)
|
||||
subnet_id = diction['subnet']['subnet']['id']
|
||||
self.create_project_octavia(protocol_type="HTTP",
|
||||
protocol_port="80",
|
||||
lb_algorithm="ROUND_ROBIN",
|
||||
vip_subnet_id=subnet_id,
|
||||
l7policy=True, action='REJECT',
|
||||
fip_disassociate=True, hm_type='PING',
|
||||
timeout=self.hm_timeout,
|
||||
max_retries=self.hm_max_retries,
|
||||
delay=self.hm_delay)
|
||||
lb_id = self.loadbalancer['id']
|
||||
noerr, status_dict = self.get_status_lb_resources(lb_id)
|
||||
self.assertTrue(noerr, status_dict)
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('c5ac8546-6867-4b7a-8544-3843a11b1a34')
|
||||
def test_verify_rbac_network_octavia_lb_admin(self):
|
||||
"""
|
||||
Fetch the status of loadbalancer resources which uses the
|
||||
api of openstack loadbalancer status show <lb>
|
||||
"""
|
||||
diction = self.deploy_octavia_topology()
|
||||
self.start_web_servers(constants.HTTP_PORT)
|
||||
net_id = diction['network']['id']
|
||||
port_id = self.cmgr_adm.ports_client.create_port(
|
||||
network_id=net_id)['port']['id']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.ports_client.delete_port, port_id)
|
||||
self.rbac_client.create_rbac_policy(action="access_as_shared",
|
||||
object_type="network",
|
||||
object_id=net_id,
|
||||
target_tenant="admin")
|
||||
self.create_project_octavia(protocol_type="HTTPS", protocol_port="80",
|
||||
lb_algorithm="ROUND_ROBIN",
|
||||
vip_port_id=port_id, hm_type='PING',
|
||||
timeout=self.hm_timeout,
|
||||
max_retries=self.hm_max_retries,
|
||||
delay=self.hm_delay, default_pool=True)
|
||||
|
||||
def test_delete_octavia_lb_with_cascade(self):
|
||||
"""
|
||||
Create octavia LB and delete it with --cascade options
|
||||
"""
|
||||
diction = self.deploy_octavia_topology()
|
||||
self.start_web_servers(constants.HTTP_PORT)
|
||||
net_id = diction['network']['id']
|
||||
port_id = self.cmgr_adm.ports_client.create_port(
|
||||
network_id=net_id)['port']['id']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.ports_client.delete_port, port_id)
|
||||
self.create_project_octavia(protocol_type="HTTPS", protocol_port="80",
|
||||
lb_algorithm="ROUND_ROBIN",
|
||||
vip_port_id=port_id, hm_type='PING',
|
||||
timeout=self.hm_timeout,
|
||||
max_retries=self.hm_max_retries,
|
||||
delay=self.hm_delay, default_pool=True,
|
||||
no_cleanup=True)
|
||||
list_lb = []
|
||||
noerr = 0
|
||||
lb_id = self.loadbalancer['id']
|
||||
self.octavia_admin_client.\
|
||||
delete_octavia_load_balancer_with_cascade(lb_id)
|
||||
try:
|
||||
status = self.octavia_admin_client.\
|
||||
wait_for_load_balancer_status(lb_id)
|
||||
list_lb.append(lb_id)
|
||||
list_lb.append(status['listeners'][0]['id'])
|
||||
list_lb.append(status['pools'][0]['id'])
|
||||
except lib_exc.NotFound:
|
||||
noerr = 1
|
||||
self.assertTrue(noerr, list_lb)
|
||||
|
||||
def test_create_update_verify_backend_octavia_lb(self):
|
||||
"""
|
||||
Create octavia LB and delete it with --cascade options
|
||||
"""
|
||||
diction = self.deploy_octavia_topology()
|
||||
self.start_web_servers(constants.HTTP_PORT)
|
||||
net_id = diction['network']['id']
|
||||
port_id = self.cmgr_adm.ports_client.create_port(
|
||||
network_id=net_id)['port']['id']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.cmgr_adm.ports_client.delete_port, port_id)
|
||||
self.create_project_octavia(protocol_type="HTTPS", protocol_port="80",
|
||||
lb_algorithm="ROUND_ROBIN",
|
||||
vip_port_id=port_id, hm_type='PING',
|
||||
timeout=self.hm_timeout,
|
||||
max_retries=self.hm_max_retries,
|
||||
delay=self.hm_delay, default_pool=True)
|
||||
time.sleep(constants.SLEEP_BETWEEN_VIRTUAL_SEREVRS_OPEARTIONS)
|
||||
backend_name = diction['router']['router']['id'][:5]
|
||||
backend_status = self.nsxp.\
|
||||
get_loadbalancers(backend_name=backend_name)[0]
|
||||
noerr = self.verifyBackendStatus_octavia_lb(backend_status)
|
||||
self.assertTrue(noerr, "Backend Verification failed")
|
||||
|
Loading…
x
Reference in New Issue
Block a user