Merge "Delete subports from random trunks dynamic workload"
This commit is contained in:
commit
cd38bcb85b
@ -575,11 +575,13 @@ workloads:
|
|||||||
num_trunk_vms: 1
|
num_trunk_vms: 1
|
||||||
num_add_subports_trunks: 1
|
num_add_subports_trunks: 1
|
||||||
num_add_subports: 1
|
num_add_subports: 1
|
||||||
|
num_delete_subports_trunks: 1
|
||||||
|
num_delete_subports: 1
|
||||||
# workloads can be 'all', a single workload(Eg. : create_delete_servers),
|
# workloads can be 'all', a single workload(Eg. : create_delete_servers),
|
||||||
# or a comma separated string(Eg. : create_delete_servers,migrate_servers).
|
# or a comma separated string(Eg. : create_delete_servers,migrate_servers).
|
||||||
# Currently supported workloads : create_delete_servers, migrate_servers
|
# Currently supported workloads : create_delete_servers, migrate_servers
|
||||||
# create_loadbalancers, pod_fip_simulation, add_subports_to_random_trunks
|
# create_loadbalancers, pod_fip_simulation, add_subports_to_random_trunks,
|
||||||
# delete_loadbalancers,delete_members_random_lb
|
# delete_subports_from_random_trunks, delete_loadbalancers, delete_members_random_lb
|
||||||
# Note: Octavia scenarios are not included in 'all' by default, and have
|
# Note: Octavia scenarios are not included in 'all' by default, and have
|
||||||
# to be included separately.
|
# to be included separately.
|
||||||
workloads: all
|
workloads: all
|
||||||
|
@ -28,6 +28,7 @@ Functions:
|
|||||||
- get_server_by_trunk: Get server details for a given trunk
|
- get_server_by_trunk: Get server details for a given trunk
|
||||||
- pod_fip_simulation: Simulate pods with floating ips using subports on trunks and VMs
|
- pod_fip_simulation: Simulate pods with floating ips using subports on trunks and VMs
|
||||||
- add_subports_to_random_trunks: Add 'N' subports to 'M' randomly chosen trunks
|
- add_subports_to_random_trunks: Add 'N' subports to 'M' randomly chosen trunks
|
||||||
|
- delete_subports_from_random_trunks: Delete 'N' subports from 'M' randomly chosen trunks
|
||||||
- _boot_server_with_tag: Boot a server with a tag
|
- _boot_server_with_tag: Boot a server with a tag
|
||||||
- _boot_server_with_fip_and_tag: Boot server prepared for SSH actions, with tag
|
- _boot_server_with_fip_and_tag: Boot server prepared for SSH actions, with tag
|
||||||
- _get_servers_by_tag: Retrieve list of servers based on tag
|
- _get_servers_by_tag: Retrieve list of servers based on tag
|
||||||
|
@ -47,9 +47,9 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
|||||||
def run(
|
def run(
|
||||||
self, smallest_image, smallest_flavor, ext_net_id, num_vms_to_create_for_migration,
|
self, smallest_image, smallest_flavor, ext_net_id, num_vms_to_create_for_migration,
|
||||||
num_vms_to_migrate, trunk_image, trunk_flavor, num_initial_subports, num_trunk_vms,
|
num_vms_to_migrate, trunk_image, trunk_flavor, num_initial_subports, num_trunk_vms,
|
||||||
num_add_subports, num_add_subports_trunks, octavia_image, octavia_flavor, user,
|
num_add_subports, num_add_subports_trunks, num_delete_subports, num_delete_subports_trunks,
|
||||||
user_data_file, num_lbs, num_pools, num_clients,delete_num_lbs,
|
octavia_image, octavia_flavor, user, user_data_file, num_lbs, num_pools,
|
||||||
delete_num_members, num_create_delete_vms, workloads="all",
|
num_clients, delete_num_lbs, delete_num_members, num_create_delete_vms, workloads="all",
|
||||||
router_create_args=None, network_create_args=None,
|
router_create_args=None, network_create_args=None,
|
||||||
subnet_create_args=None, **kwargs):
|
subnet_create_args=None, **kwargs):
|
||||||
|
|
||||||
@ -72,6 +72,9 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
|||||||
if workloads == "all" or "add_subports_to_random_trunks" in workloads_list:
|
if workloads == "all" or "add_subports_to_random_trunks" in workloads_list:
|
||||||
self.add_subports_to_random_trunks(num_add_subports_trunks, num_add_subports)
|
self.add_subports_to_random_trunks(num_add_subports_trunks, num_add_subports)
|
||||||
|
|
||||||
|
if workloads == "all" or "delete_subports_from_random_trunks" in workloads_list:
|
||||||
|
self.delete_subports_from_random_trunks(num_delete_subports_trunks, num_delete_subports)
|
||||||
|
|
||||||
if "create_loadbalancers" in workloads_list:
|
if "create_loadbalancers" in workloads_list:
|
||||||
self.create_loadbalancers(octavia_image, octavia_flavor, user, num_lbs, user_data_file,
|
self.create_loadbalancers(octavia_image, octavia_flavor, user, num_lbs, user_data_file,
|
||||||
num_pools, num_clients, ext_net_id, router_create_args,
|
num_pools, num_clients, ext_net_id, router_create_args,
|
||||||
|
@ -19,6 +19,8 @@
|
|||||||
{% set num_trunk_vms = num_trunk_vms or 1 %}
|
{% set num_trunk_vms = num_trunk_vms or 1 %}
|
||||||
{% set num_add_subports_trunks = num_add_subports_trunks or 1 %}
|
{% set num_add_subports_trunks = num_add_subports_trunks or 1 %}
|
||||||
{% set num_add_subports = num_add_subports or 1 %}
|
{% set num_add_subports = num_add_subports or 1 %}
|
||||||
|
{% set num_delete_subports_trunks = num_delete_subports_trunks or 1 %}
|
||||||
|
{% set num_delete_subports = num_delete_subports or 1 %}
|
||||||
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
|
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
|
||||||
{% set sla_max_failure = sla_max_failure or 0 %}
|
{% set sla_max_failure = sla_max_failure or 0 %}
|
||||||
{% set sla_max_seconds = sla_max_seconds or 60 %}
|
{% set sla_max_seconds = sla_max_seconds or 60 %}
|
||||||
@ -50,6 +52,8 @@ BrowbeatPlugin.dynamic_workload:
|
|||||||
num_trunk_vms: {{num_trunk_vms}}
|
num_trunk_vms: {{num_trunk_vms}}
|
||||||
num_add_subports_trunks: {{num_add_subports_trunks}}
|
num_add_subports_trunks: {{num_add_subports_trunks}}
|
||||||
num_add_subports: {{num_add_subports}}
|
num_add_subports: {{num_add_subports}}
|
||||||
|
num_delete_subports_trunks: {{num_delete_subports_trunks}}
|
||||||
|
num_delete_subports: {{num_delete_subports}}
|
||||||
network_create_args: {}
|
network_create_args: {}
|
||||||
router_create_args: {}
|
router_create_args: {}
|
||||||
subnet_create_args: {}
|
subnet_create_args: {}
|
||||||
|
@ -108,19 +108,19 @@ class TrunkDynamicScenario(
|
|||||||
trunk["trunk"]["sub_ports"][subport_number_for_route-1]["port_id"])
|
trunk["trunk"]["sub_ports"][subport_number_for_route-1]["port_id"])
|
||||||
subnet_for_route = self.clients("neutron").show_subnet(
|
subnet_for_route = self.clients("neutron").show_subnet(
|
||||||
subport_for_route["port"]["fixed_ips"][0]["subnet_id"])
|
subport_for_route["port"]["fixed_ips"][0]["subnet_id"])
|
||||||
self.add_route_from_vm_to_jumphost(vm_fip, jump_fip, "centos",
|
self.add_route_from_vm_to_jumphost(vm_fip, jump_fip, self.trunk_vm_user,
|
||||||
subport_number_for_route,
|
subport_number_for_route,
|
||||||
subnet_for_route["subnet"]["gateway_ip"])
|
subnet_for_route["subnet"]["gateway_ip"])
|
||||||
subport_fip = self._create_floatingip(self.ext_net_name)["floatingip"]
|
subport_fip = self._create_floatingip(self.ext_net_name)["floatingip"]
|
||||||
self.ping_subport_fip_from_jumphost(vm_fip, jump_fip, "centos", "cirros",
|
self.ping_subport_fip_from_jumphost(vm_fip, jump_fip, self.trunk_vm_user,
|
||||||
subport_fip,
|
self.jumphost_user, subport_fip,
|
||||||
subport_for_route["port"])
|
subport_for_route["port"])
|
||||||
# We delete the route from vm to jumphost through the randomly
|
# We delete the route from vm to jumphost through the randomly
|
||||||
# chosen subport after simulate subport connection is executed,
|
# chosen subport after simulate subport connection is executed,
|
||||||
# as additional subports can be tested for connection in the
|
# as additional subports can be tested for connection in the
|
||||||
# add_subports_random_trunks function, and we would not want the
|
# add_subports_random_trunks function, and we would not want the
|
||||||
# existing route created here to be used for those subports.
|
# existing route created here to be used for those subports.
|
||||||
self.delete_route_from_vm_to_jumphost(vm_fip, jump_fip, "centos",
|
self.delete_route_from_vm_to_jumphost(vm_fip, jump_fip, self.trunk_vm_user,
|
||||||
subport_number_for_route,
|
subport_number_for_route,
|
||||||
subnet_for_route["subnet"]["gateway_ip"])
|
subnet_for_route["subnet"]["gateway_ip"])
|
||||||
# Dissociate floating IP as the same subport can be used again
|
# Dissociate floating IP as the same subport can be used again
|
||||||
@ -144,7 +144,8 @@ class TrunkDynamicScenario(
|
|||||||
:param trunk: dict, trunk details
|
:param trunk: dict, trunk details
|
||||||
:returns: floating ip of jumphost
|
:returns: floating ip of jumphost
|
||||||
"""
|
"""
|
||||||
jumphost_fip = trunk["description"][9:]
|
if trunk["description"].startswith("jumphost:"):
|
||||||
|
jumphost_fip = trunk["description"][9:]
|
||||||
return jumphost_fip
|
return jumphost_fip
|
||||||
|
|
||||||
def create_subnets_and_subports(self, subport_count):
|
def create_subnets_and_subports(self, subport_count):
|
||||||
@ -178,18 +179,19 @@ class TrunkDynamicScenario(
|
|||||||
"""
|
"""
|
||||||
# Inside VM, subports are simulated (implemented) using vlan interfaces
|
# Inside VM, subports are simulated (implemented) using vlan interfaces
|
||||||
# Later we ping these vlan interfaces
|
# Later we ping these vlan interfaces
|
||||||
for seg_id, p in enumerate(subports, start=start_seg_id):
|
for seg_id, subport in enumerate(subports,
|
||||||
|
start=start_seg_id):
|
||||||
subport_payload = [
|
subport_payload = [
|
||||||
{
|
{
|
||||||
"port_id": p["port"]["id"],
|
"port_id": subport["port"]["id"],
|
||||||
"segmentation_type": "vlan",
|
"segmentation_type": "vlan",
|
||||||
"segmentation_id": seg_id,
|
"segmentation_id": seg_id,
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
self._add_subports_to_trunk(trunk_id, subport_payload)
|
self._add_subports_to_trunk(trunk_id, subport_payload)
|
||||||
|
|
||||||
mac = p["port"]["mac_address"]
|
mac = subport["port"]["mac_address"]
|
||||||
address = p["port"]["fixed_ips"][0]["ip_address"]
|
address = subport["port"]["fixed_ips"][0]["ip_address"]
|
||||||
# Note: Manually assign ip as calling dnsmasq will also add
|
# Note: Manually assign ip as calling dnsmasq will also add
|
||||||
# default route which will break floating ip for the VM
|
# default route which will break floating ip for the VM
|
||||||
cmd = f"sudo ip link add link eth0 name eth0.{seg_id} type vlan id {seg_id}"
|
cmd = f"sudo ip link add link eth0 name eth0.{seg_id} type vlan id {seg_id}"
|
||||||
@ -218,6 +220,9 @@ class TrunkDynamicScenario(
|
|||||||
"name"
|
"name"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
self.trunk_vm_user = "centos"
|
||||||
|
self.jumphost_user = "cirros"
|
||||||
|
|
||||||
router_create_args = {}
|
router_create_args = {}
|
||||||
router_create_args["name"] = self.generate_random_name()
|
router_create_args["name"] = self.generate_random_name()
|
||||||
router_create_args["tenant_id"] = self.context["tenant"]["id"]
|
router_create_args["tenant_id"] = self.context["tenant"]["id"]
|
||||||
@ -262,7 +267,8 @@ class TrunkDynamicScenario(
|
|||||||
|
|
||||||
subnets, subports = self.create_subnets_and_subports(subport_count)
|
subnets, subports = self.create_subnets_and_subports(subport_count)
|
||||||
|
|
||||||
vm_ssh = sshutils.SSH("centos", vm_fip, pkey=self.keypair["private"])
|
vm_ssh = sshutils.SSH(self.trunk_vm_user,
|
||||||
|
vm_fip, pkey=self.keypair["private"])
|
||||||
self._wait_for_ssh(vm_ssh)
|
self._wait_for_ssh(vm_ssh)
|
||||||
|
|
||||||
self.add_subports_to_trunk_and_vm(subports, trunk["trunk"]["id"], vm_ssh, 1)
|
self.add_subports_to_trunk_and_vm(subports, trunk["trunk"]["id"], vm_ssh, 1)
|
||||||
@ -287,7 +293,7 @@ class TrunkDynamicScenario(
|
|||||||
jump_fip = self.get_jumphost_by_trunk(trunk)
|
jump_fip = self.get_jumphost_by_trunk(trunk)
|
||||||
|
|
||||||
vm_ssh = sshutils.SSH(
|
vm_ssh = sshutils.SSH(
|
||||||
"centos", trunk_server_fip, pkey=self.keypair["private"]
|
self.trunk_vm_user, trunk_server_fip, pkey=self.keypair["private"]
|
||||||
)
|
)
|
||||||
self._wait_for_ssh(vm_ssh)
|
self._wait_for_ssh(vm_ssh)
|
||||||
|
|
||||||
@ -295,3 +301,62 @@ class TrunkDynamicScenario(
|
|||||||
vm_ssh, len(trunk["sub_ports"])+1)
|
vm_ssh, len(trunk["sub_ports"])+1)
|
||||||
|
|
||||||
self.simulate_subport_connection(trunk["id"], trunk_server_fip, jump_fip)
|
self.simulate_subport_connection(trunk["id"], trunk_server_fip, jump_fip)
|
||||||
|
|
||||||
|
def delete_subports_from_random_trunks(self, num_trunks, subport_count):
|
||||||
|
"""Delete <<subport_count>> subports from <<num_trunks>> randomly chosen trunks
|
||||||
|
:param num_trunks: int, number of trunks to be randomly chosen
|
||||||
|
:param subport_count: int, number of subports to add to each trunk
|
||||||
|
"""
|
||||||
|
trunks = self._list_trunks()
|
||||||
|
|
||||||
|
eligible_trunks = [trunk for trunk in trunks if len(trunk['sub_ports']) >= subport_count]
|
||||||
|
num_trunks = min(num_trunks, len(trunks))
|
||||||
|
random.shuffle(eligible_trunks)
|
||||||
|
|
||||||
|
if len(eligible_trunks) >= num_trunks:
|
||||||
|
trunks_to_delete_subports = [eligible_trunks[i] for i in range(num_trunks)]
|
||||||
|
else:
|
||||||
|
trunks_to_delete_subports = sorted(trunks,
|
||||||
|
key=lambda k:-len(k['sub_ports']))[:num_trunks]
|
||||||
|
subport_count = len(trunks_to_delete_subports[-1]['sub_ports'])
|
||||||
|
|
||||||
|
for trunk in trunks_to_delete_subports:
|
||||||
|
trunk_server_fip = self.get_server_by_trunk(trunk)
|
||||||
|
jump_fip = self.get_jumphost_by_trunk(trunk)
|
||||||
|
|
||||||
|
vm_ssh = sshutils.SSH(self.trunk_vm_user,
|
||||||
|
trunk_server_fip, pkey=self.keypair["private"])
|
||||||
|
self._wait_for_ssh(vm_ssh)
|
||||||
|
|
||||||
|
trunk_subports = trunk['sub_ports']
|
||||||
|
num_trunk_subports = len(trunk_subports)
|
||||||
|
|
||||||
|
# We delete subports from trunks starting from the last subport,
|
||||||
|
# instead of randomly. This is because deleting random subports
|
||||||
|
# might cause a lot of conflict with the add_subports_to_random_
|
||||||
|
# trunks function.
|
||||||
|
for subport_number in range(num_trunk_subports-1,
|
||||||
|
num_trunk_subports-1-subport_count, -1):
|
||||||
|
subport_details = trunk_subports[subport_number]
|
||||||
|
subport_to_delete = self.clients("neutron").show_port(subport_details["port_id"])
|
||||||
|
subport_payload = [
|
||||||
|
{
|
||||||
|
"port_id": subport_to_delete["port"]["id"],
|
||||||
|
"segmentation_type": "vlan",
|
||||||
|
"segmentation_id": subport_number+1,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
cmd = f"sudo ip link delete eth0.{subport_number+1}"
|
||||||
|
self._run_command_with_attempts(vm_ssh,cmd)
|
||||||
|
self.clients("neutron").trunk_remove_subports(trunk["id"],
|
||||||
|
{"sub_ports": subport_payload})
|
||||||
|
self.clients("neutron").delete_port(subport_to_delete["port"]["id"])
|
||||||
|
|
||||||
|
# Check the number of subports present in trunk after deletion,
|
||||||
|
# and simulate subport connection if it is > 0. We use the
|
||||||
|
# show_trunk function here to get updated information about the
|
||||||
|
# trunk, as the trunk loop variable will have whatever information
|
||||||
|
# was valid at the beginning of the loop.
|
||||||
|
if len(self.clients("neutron").show_trunk(trunk["id"])["trunk"]["sub_ports"]) > 0:
|
||||||
|
self.simulate_subport_connection(trunk["id"], trunk_server_fip, jump_fip)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user