
Part of Support Nested Stacks and Updates story To add nested stack support to Valet, make up for missing Heat resource Orchestration IDs in nested resources by generating a subset of Heat stack lifecycle scheduler hints for each resource in advance, store them as opaque metadata in Valet, then leverage the metadata at Nova scheduling time. Make additional accommodations in anticipation of complexities brought about by adding support for stack updates. Change-Id: Ifed5b0f8172e522caf7e520f8131f23d4d336f4f Story: #2001139 Task: #4855
248 lines
7.7 KiB
Python
248 lines
7.7 KiB
Python
#
|
|
# Copyright 2014-2017 AT&T Intellectual Property
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
|
|
class GroupResource(object):
|
|
"""Container for all groups."""
|
|
|
|
def __init__(self):
|
|
self.name = None
|
|
self.group_type = "AGGR"
|
|
|
|
self.metadata = {}
|
|
|
|
self.num_of_placed_vms = 0
|
|
# key = host (host or rack), value = num_of_placed_vms
|
|
self.num_of_placed_vms_per_host = {}
|
|
|
|
|
|
class Resource(object):
|
|
"""Resource."""
|
|
|
|
def __init__(self):
|
|
self.host_name = None
|
|
# all mapped groups to host
|
|
self.host_memberships = {}
|
|
# original total vCPUs before overcommit
|
|
self.host_vCPUs = 0
|
|
# remaining vCPUs after overcommit
|
|
self.host_avail_vCPUs = 0
|
|
# original total mem cap before overcommit
|
|
self.host_mem = 0
|
|
# remaining mem cap after
|
|
self.host_avail_mem = 0
|
|
# original total local disk cap before overcommit
|
|
self.host_local_disk = 0
|
|
# remaining local disk cap after overcommit
|
|
self.host_avail_local_disk = 0
|
|
# the number of vms currently placed in this host
|
|
self.host_num_of_placed_vms = 0
|
|
|
|
# where this host is located
|
|
self.rack_name = None
|
|
self.rack_memberships = {}
|
|
self.rack_vCPUs = 0
|
|
self.rack_avail_vCPUs = 0
|
|
self.rack_mem = 0
|
|
self.rack_avail_mem = 0
|
|
self.rack_local_disk = 0
|
|
self.rack_avail_local_disk = 0
|
|
self.rack_num_of_placed_vms = 0
|
|
|
|
# where this host and rack are located
|
|
self.cluster_name = None
|
|
|
|
self.cluster_memberships = {}
|
|
self.cluster_vCPUs = 0
|
|
self.cluster_avail_vCPUs = 0
|
|
self.cluster_mem = 0
|
|
self.cluster_avail_mem = 0
|
|
self.cluster_local_disk = 0
|
|
self.cluster_avail_local_disk = 0
|
|
self.cluster_num_of_placed_vms = 0
|
|
|
|
# level of placement
|
|
self.level = None
|
|
|
|
# order to place
|
|
self.sort_base = 0
|
|
|
|
def get_common_placement(self, _resource):
|
|
"""Get common placement level."""
|
|
"""Get the common level between this resource and the one
|
|
provided."""
|
|
level = None
|
|
|
|
if self.cluster_name != _resource.cluster_name:
|
|
level = "cluster"
|
|
else:
|
|
if self.rack_name != _resource.rack_name:
|
|
level = "rack"
|
|
else:
|
|
if self.host_name != _resource.host_name:
|
|
level = "host"
|
|
else:
|
|
level = "ANY"
|
|
|
|
return level
|
|
|
|
def get_resource_name(self, _level):
|
|
"""Get the name of this resource at the specified level."""
|
|
name = "unknown"
|
|
|
|
if _level == "cluster":
|
|
name = self.cluster_name
|
|
elif _level == "rack":
|
|
name = self.rack_name
|
|
elif _level == "host":
|
|
name = self.host_name
|
|
|
|
return name
|
|
|
|
def get_memberships(self, _level):
|
|
"""Get the memberships of this resource at the specified level."""
|
|
memberships = None
|
|
|
|
if _level == "cluster":
|
|
memberships = self.cluster_memberships
|
|
elif _level == "rack":
|
|
memberships = self.rack_memberships
|
|
elif _level == "host":
|
|
memberships = self.host_memberships
|
|
|
|
return memberships
|
|
|
|
def get_all_memberships(self, _level):
|
|
memberships = {}
|
|
|
|
if _level == "cluster":
|
|
for mk, m in self.cluster_memberships.iteritems():
|
|
memberships[mk] = m
|
|
for mk, m in self.rack_memberships.iteritems():
|
|
memberships[mk] = m
|
|
for mk, m in self.host_memberships.iteritems():
|
|
memberships[mk] = m
|
|
elif _level == "rack":
|
|
for mk, m in self.rack_memberships.iteritems():
|
|
memberships[mk] = m
|
|
for mk, m in self.host_memberships.iteritems():
|
|
memberships[mk] = m
|
|
elif _level == "host":
|
|
for mk, m in self.host_memberships.iteritems():
|
|
memberships[mk] = m
|
|
|
|
return memberships
|
|
|
|
def get_num_of_placed_vms(self, _level):
|
|
"""Get the number of placed vms of this resource at a given level."""
|
|
num_of_vms = 0
|
|
|
|
if _level == "cluster":
|
|
num_of_vms = self.cluster_num_of_placed_vms
|
|
elif _level == "rack":
|
|
num_of_vms = self.rack_num_of_placed_vms
|
|
elif _level == "host":
|
|
num_of_vms = self.host_num_of_placed_vms
|
|
|
|
return num_of_vms
|
|
|
|
def get_avail_resources(self, _level):
|
|
"""Get available resources of this resource at a given level.
|
|
|
|
Returns the available vCPUs, memory, local disk of this resource
|
|
the specified level.
|
|
"""
|
|
avail_vCPUs = 0
|
|
avail_mem = 0
|
|
avail_local_disk = 0
|
|
|
|
if _level == "cluster":
|
|
avail_vCPUs = self.cluster_avail_vCPUs
|
|
avail_mem = self.cluster_avail_mem
|
|
avail_local_disk = self.cluster_avail_local_disk
|
|
elif _level == "rack":
|
|
avail_vCPUs = self.rack_avail_vCPUs
|
|
avail_mem = self.rack_avail_mem
|
|
avail_local_disk = self.rack_avail_local_disk
|
|
elif _level == "host":
|
|
avail_vCPUs = self.host_avail_vCPUs
|
|
avail_mem = self.host_avail_mem
|
|
avail_local_disk = self.host_avail_local_disk
|
|
|
|
return (avail_vCPUs, avail_mem, avail_local_disk)
|
|
|
|
def get_local_disk(self, _level):
|
|
"""Get the local disk information.
|
|
|
|
Returns the local disk and available local disk of this resource
|
|
at the specified level.
|
|
"""
|
|
local_disk = 0
|
|
avail_local_disk = 0
|
|
|
|
if _level == "cluster":
|
|
local_disk = self.cluster_local_disk
|
|
avail_local_disk = self.cluster_avail_local_disk
|
|
elif _level == "rack":
|
|
local_disk = self.rack_local_disk
|
|
avail_local_disk = self.rack_avail_local_disk
|
|
elif _level == "host":
|
|
local_disk = self.host_local_disk
|
|
avail_local_disk = self.host_avail_local_disk
|
|
|
|
return (local_disk, avail_local_disk)
|
|
|
|
def get_vCPUs(self, _level):
|
|
"""Get the vCPUs information.
|
|
|
|
Returns the vCPUs and available vCPUs of this resource at the
|
|
specified level.
|
|
"""
|
|
vCPUs = 0
|
|
avail_vCPUs = 0
|
|
|
|
if _level == "cluster":
|
|
vCPUs = self.cluster_vCPUs
|
|
avail_vCPUs = self.cluster_avail_vCPUs
|
|
elif _level == "rack":
|
|
vCPUs = self.rack_vCPUs
|
|
avail_vCPUs = self.rack_avail_vCPUs
|
|
elif _level == "host":
|
|
vCPUs = self.host_vCPUs
|
|
avail_vCPUs = self.host_avail_vCPUs
|
|
|
|
return (vCPUs, avail_vCPUs)
|
|
|
|
def get_mem(self, _level):
|
|
"""Get memory information.
|
|
|
|
Returns the memory and available memory of this resource at the
|
|
specified level.
|
|
"""
|
|
mem = 0
|
|
avail_mem = 0
|
|
|
|
if _level == "cluster":
|
|
mem = self.cluster_mem
|
|
avail_mem = self.cluster_avail_mem
|
|
elif _level == "rack":
|
|
mem = self.rack_mem
|
|
avail_mem = self.rack_avail_mem
|
|
elif _level == "host":
|
|
mem = self.host_mem
|
|
avail_mem = self.host_avail_mem
|
|
|
|
return (mem, avail_mem)
|