Use placement for unified resource management

placement restapi integration

Co-Authored-By: Hongbin Lu <hongbin034@gmail.com>
Change-Id: I81fbadd030e486516c6bdcf0526e89a5d58d3e26
Implements: blueprint use-placement-resource-management
This commit is contained in:
Feng Shengqin 2018-07-30 14:56:25 +08:00 committed by Hongbin Lu
parent 23b3b7cee1
commit 0fb82e70a8
27 changed files with 8333 additions and 13 deletions

View File

@ -66,8 +66,10 @@ openstacksdk==0.12.0
os-api-ref==1.4.0
os-brick==2.2.0
os-client-config==1.29.0
os-resource-classes==0.1.0
os-service-types==1.2.0
os-testr==1.0.0
os-traits==0.15.0
os-win==4.0.0
osc-lib==1.10.0
oslo.cache==1.29.0
@ -84,8 +86,8 @@ oslo.privsep==1.32.0
oslo.serialization==2.18.0
oslo.service==1.24.0
oslo.upgradecheck==0.1.0
oslo.utils==3.33.0
oslo.versionedobjects==1.31.2
oslo.utils==3.37.0
oslo.versionedobjects==1.35.0
oslo.rootwrap==5.8.0
oslotest==3.2.0
osprofiler==1.4.0

View File

@ -20,13 +20,15 @@ oslo.policy>=1.30.0 # Apache-2.0
oslo.privsep>=1.32.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
oslo.versionedobjects>=1.35.0 # Apache-2.0
oslo.context>=2.19.2 # Apache-2.0
oslo.utils>=3.33.0 # Apache-2.0
oslo.utils>=3.37.0 # Apache-2.0
oslo.db>=4.27.0 # Apache-2.0
oslo.rootwrap>=5.8.0 # Apache-2.0
oslo.upgradecheck>=0.1.0 # Apache-2.0
os-brick>=2.2.0 # Apache-2.0
os-resource-classes>=0.1.0 # Apache-2.0
os-traits>=0.15.0 # Apache-2.0
six>=1.10.0 # MIT
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
stevedore>=1.20.0 # Apache-2.0

View File

@ -14,6 +14,7 @@
from cinderclient import client as cinderclient
from glanceclient import client as glanceclient
from keystoneauth1.loading import adapter as ka_adapter
from neutronclient.v2_0 import client as neutronclient
from zun.common import exception
@ -30,6 +31,8 @@ class OpenStackClients(object):
self._glance = None
self._neutron = None
self._cinder = None
self._placement = None
self._placement_ks_filter = None
def url_for(self, **kwargs):
return self.keystone().session.get_endpoint(**kwargs)
@ -115,3 +118,26 @@ class OpenStackClients(object):
**kwargs)
return self._cinder
@exception.wrap_keystone_exception
def placement(self):
if self._placement:
return self._placement, self._placement_ks_filter
session = self.keystone().session
session.verify = \
self._get_client_option('placement', 'ca_file') or True
if self._get_client_option('placement', 'insecure'):
session.verify = False
region_name = self._get_client_option('placement', 'region_name')
endpoint_type = self._get_client_option('placement', 'endpoint_type')
kwargs = {
'session': self.keystone().session,
'auth': self.keystone().auth,
}
self._placement_ks_filter = {'service_type': 'placement',
'region_name': region_name,
'interface': endpoint_type}
self._placement = ka_adapter.Adapter().load_from_options(**kwargs)
return self._placement, self._placement_ks_filter

View File

@ -423,10 +423,6 @@ class ZunServiceNotFound(HTTPNotFound):
message = _("Zun service %(binary)s on host %(host)s could not be found.")
class ResourceProviderNotFound(HTTPNotFound):
message = _("Resource provider %(resource_provider)s could not be found.")
class ResourceClassNotFound(HTTPNotFound):
message = _("Resource class %(resource_class)s could not be found.")
@ -776,3 +772,113 @@ class NameEmpty(InvalidReference):
class NameTooLong(InvalidReference):
message = _('repository name must not be more than %(length_max)s '
'characters')
# An exception with this name is used on both sides of the placement/zun
# interaction.
class ResourceProviderInUse(ZunException):
message = _("Resource provider has allocations.")
class ResourceProviderRetrievalFailed(ZunException):
message = _("Failed to get resource provider with UUID %(uuid)s")
class ResourceProviderAggregateRetrievalFailed(ZunException):
message = _("Failed to get aggregates for resource provider with UUID"
" %(uuid)s")
class ResourceProviderTraitRetrievalFailed(ZunException):
message = _("Failed to get traits for resource provider with UUID"
" %(uuid)s")
class ResourceProviderCreationFailed(ZunException):
message = _("Failed to create resource provider %(name)s")
class ResourceProviderDeletionFailed(ZunException):
message = _("Failed to delete resource provider %(uuid)s")
class ResourceProviderUpdateFailed(ZunException):
message = _("Failed to update resource provider via URL %(url)s: "
"%(error)s")
class ResourceProviderNotFound(NotFound):
message = _("No such resource provider %(name_or_uuid)s.")
class ResourceProviderSyncFailed(ZunException):
message = _("Failed to synchronize the placement service with resource "
"provider information supplied by the compute host.")
class PlacementAPIConnectFailure(ZunException):
message = _("Unable to communicate with the Placement API.")
class PlacementAPIConflict(ZunException):
"""Any 409 error from placement APIs should use (a subclass of) this
exception.
"""
message = _("A conflict was encountered attempting to invoke the "
"placement API at URL %(url)s: %(error)s")
class ResourceProviderUpdateConflict(PlacementAPIConflict):
"""A 409 caused by generation mismatch from attempting to update an
existing provider record or its associated data (aggregates, traits, etc.).
"""
message = _("A conflict was encountered attempting to update resource "
"provider %(uuid)s (generation %(generation)d): %(error)s")
class InvalidResourceClass(Invalid):
message = _("Resource class '%(resource_class)s' invalid.")
class InvalidResourceAmount(Invalid):
message = _("Resource amounts must be integers. Received '%(amount)s'.")
class InvalidInventory(Invalid):
message = _("Inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s' invalid.")
class UsagesRetrievalFailed(ZunException):
message = _("Failed to retrieve usages for project '%(project_id)s' and "
"user '%(user_id)s'.")
class AllocationUpdateFailed(ZunException):
message = _('Failed to update allocations for consumer %(consumer_uuid)s. '
'Error: %(error)s')
class ConsumerAllocationRetrievalFailed(ZunException):
message = _("Failed to retrieve allocations for consumer "
"%(consumer_uuid)s: %(error)s")
class TraitRetrievalFailed(ZunException):
message = _("Failed to get traits for resource provider with UUID"
" %(uuid)s")
class TraitCreationFailed(ZunException):
message = _("Failed to create trait %(name)s: %(error)s")
class AllocationMoveFailed(ZunException):
message = _('Failed to move allocations from consumer %(source_consumer)s '
'to consumer %(target_consumer)s. '
'Error: %(error)s')
class ResourceProviderAllocationRetrievalFailed(ZunException):
message = _("Failed to retrieve allocations for resource provider "
"%(rp_uuid)s: %(error)s")

View File

@ -33,6 +33,7 @@ class KeystoneClientV3(object):
self.context = context
self._client = None
self._session = None
self._auth = None
@property
def auth_url(self):
@ -55,6 +56,14 @@ class KeystoneClientV3(object):
self._session = session
return session
@property
def auth(self):
if self._auth:
return self._auth
auth = self._get_auth()
self._auth = auth
return auth
def _get_session(self, auth):
session = ka_loading.load_session_from_conf_options(
CONF, ksconf.CFG_GROUP, auth=auth)

View File

@ -18,6 +18,7 @@
"""Utilities and helper functions."""
import base64
import binascii
import contextlib
import eventlet
import functools
import inspect
@ -716,3 +717,12 @@ def decode_file_data(data):
def strtime(at):
return at.strftime("%Y-%m-%dT%H:%M:%S.%f")
if six.PY2:
nested_contexts = contextlib.nested
else:
@contextlib.contextmanager
def nested_contexts(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]

View File

@ -23,7 +23,7 @@ from zun.compute import container_actions
from zun.compute import rpcapi
import zun.conf
from zun import objects
from zun.scheduler import client as scheduler_client
from zun.scheduler.client import query as scheduler_client
CONF = zun.conf.CONF

View File

@ -24,7 +24,7 @@ import zun.conf
from zun import objects
from zun.objects import base as obj_base
from zun.pci import manager as pci_manager
from zun.scheduler import client as scheduler_client
from zun.scheduler.client import query as scheduler_client
CONF = zun.conf.CONF

View File

@ -0,0 +1,680 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An object describing a tree of resource providers and their inventories.
This object is not stored in the Zun API; rather, this
object is constructed and used by the scheduler report client to track state
changes for resources on the container engine. As such, there are
no remoteable methods nor is there any interaction with the zun.db modules.
"""
import collections
import copy
import os_traits
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import uuidutils
from zun.common.i18n import _
LOG = logging.getLogger(__name__)
_LOCK_NAME = 'provider-tree-lock'
# Point-in-time representation of a resource provider in the tree.
# Note that, whereas namedtuple enforces read-only-ness of containers as a
# whole, nothing prevents modification of the internals of attributes of
# complex types (children/inventory/traits/aggregates). However, any such
# modifications still have no effect on the ProviderTree the container came
# from. Like, you can Sharpie a moustache on a Polaroid of my face, but that
# doesn't make a moustache appear on my actual face.
ProviderData = collections.namedtuple(
'ProviderData', ['uuid', 'name', 'generation', 'parent_uuid', 'inventory',
'traits', 'aggregates'])
class _Provider(object):
"""Represents a resource provider in the tree. All operations against the
tree should be done using the ProviderTree interface, since it controls
thread-safety.
"""
def __init__(self, name, uuid=None, generation=None, parent_uuid=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
self.uuid = uuid
self.name = name
self.generation = generation
self.parent_uuid = parent_uuid
# Contains a dict, keyed by uuid of child resource providers having
# this provider as a parent
self.children = {}
# dict of inventory records, keyed by resource class
self.inventory = {}
# Set of trait names
self.traits = set()
# Set of aggregate UUIDs
self.aggregates = set()
@classmethod
def from_dict(cls, pdict):
"""Factory method producing a _Provider based on a dict with
appropriate keys.
:param pdict: Dictionary representing a provider, with keys 'name',
'uuid', 'generation', 'parent_provider_uuid'. Of these,
only 'name' is mandatory.
"""
return cls(pdict['name'], uuid=pdict.get('uuid'),
generation=pdict.get('generation'),
parent_uuid=pdict.get('parent_provider_uuid'))
def data(self):
inventory = copy.deepcopy(self.inventory)
traits = copy.copy(self.traits)
aggregates = copy.copy(self.aggregates)
return ProviderData(
self.uuid, self.name, self.generation, self.parent_uuid,
inventory, traits, aggregates)
def get_provider_uuids(self):
"""Returns a list, in top-down traversal order, of UUIDs of this
provider and all its descendants.
"""
ret = [self.uuid]
for child in self.children.values():
ret.extend(child.get_provider_uuids())
return ret
def find(self, search):
if self.name == search or self.uuid == search:
return self
if search in self.children:
return self.children[search]
if self.children:
for child in self.children.values():
# We already searched for the child by UUID above, so here we
# just check for a child name match
if child.name == search:
return child
subchild = child.find(search)
if subchild:
return subchild
return None
def add_child(self, provider):
self.children[provider.uuid] = provider
def remove_child(self, provider):
if provider.uuid in self.children:
del self.children[provider.uuid]
def has_inventory(self):
"""Returns whether the provider has any inventory records at all. """
return self.inventory != {}
def has_inventory_changed(self, new):
"""Returns whether the inventory has changed for the provider."""
cur = self.inventory
if set(cur) != set(new):
return True
for key, cur_rec in cur.items():
new_rec = new[key]
# If the new record contains new fields (e.g. we're adding on
# `reserved` or `allocation_ratio`) we want to make sure to pick
# them up
if set(new_rec) - set(cur_rec):
return True
for rec_key, cur_val in cur_rec.items():
if rec_key not in new_rec:
# Deliberately don't want to compare missing keys in the
# *new* inventory record. For instance, we will be passing
# in fields like allocation_ratio in the current dict but
# the resource tracker may only pass in the total field. We
# want to return that inventory didn't change when the
# total field values are the same even if the
# allocation_ratio field is missing from the new record.
continue
if new_rec[rec_key] != cur_val:
return True
return False
def _update_generation(self, generation, operation):
if generation is not None and generation != self.generation:
msg_args = {
'rp_uuid': self.uuid,
'old': self.generation,
'new': generation,
'op': operation
}
LOG.debug("Updating resource provider %(rp_uuid)s generation "
"from %(old)s to %(new)s during operation: %(op)s",
msg_args)
self.generation = generation
def update_inventory(self, inventory, generation):
"""Update the stored inventory for the provider along with a resource
provider generation to set the provider to. The method returns whether
the inventory has changed.
"""
self._update_generation(generation, 'update_inventory')
if self.has_inventory_changed(inventory):
LOG.debug('Updating inventory in ProviderTree for provider %s '
'with inventory: %s', self.uuid, inventory)
self.inventory = copy.deepcopy(inventory)
return True
LOG.debug('Inventory has not changed in ProviderTree for provider: %s',
self.uuid)
return False
def have_traits_changed(self, new):
"""Returns whether the provider's traits have changed."""
return set(new) != self.traits
def update_traits(self, new, generation=None):
"""Update the stored traits for the provider along with a resource
provider generation to set the provider to. The method returns whether
the traits have changed.
"""
self._update_generation(generation, 'update_traits')
if self.have_traits_changed(new):
self.traits = set(new) # create a copy of the new traits
return True
return False
def has_traits(self, traits):
"""Query whether the provider has certain traits.
:param traits: Iterable of string trait names to look for.
:return: True if this provider has *all* of the specified traits; False
if any of the specified traits are absent. Returns True if
the traits parameter is empty.
"""
return not bool(set(traits) - self.traits)
def have_aggregates_changed(self, new):
"""Returns whether the provider's aggregates have changed."""
return set(new) != self.aggregates
def update_aggregates(self, new, generation=None):
"""Update the stored aggregates for the provider along with a resource
provider generation to set the provider to. The method returns whether
the aggregates have changed.
"""
self._update_generation(generation, 'update_aggregates')
if self.have_aggregates_changed(new):
self.aggregates = set(new) # create a copy of the new aggregates
return True
return False
def in_aggregates(self, aggregates):
"""Query whether the provider is a member of certain aggregates.
:param aggregates: Iterable of string aggregate UUIDs to look for.
:return: True if this provider is a member of *all* of the specified
aggregates; False if any of the specified aggregates are
absent. Returns True if the aggregates parameter is empty.
"""
return not bool(set(aggregates) - self.aggregates)
class ProviderTree(object):
def __init__(self):
"""Create an empty provider tree."""
self.lock = lockutils.internal_lock(_LOCK_NAME)
self.roots = []
def get_provider_uuids(self, name_or_uuid=None):
"""Return a list, in top-down traversable order, of the UUIDs of all
providers (in a (sub)tree).
:param name_or_uuid: Provider name or UUID representing the root of a
(sub)tree for which to return UUIDs. If not
specified, the method returns all UUIDs in the
ProviderTree.
"""
if name_or_uuid is not None:
with self.lock:
return self._find_with_lock(name_or_uuid).get_provider_uuids()
# If no name_or_uuid, get UUIDs for all providers recursively.
ret = []
with self.lock:
for root in self.roots:
ret.extend(root.get_provider_uuids())
return ret
def get_provider_uuids_in_tree(self, name_or_uuid):
"""Returns a list, in top-down traversable order, of the UUIDs of all
providers in the whole tree of which the provider identified by
``name_or_uuid`` is a member.
:param name_or_uuid: Provider name or UUID representing any member of
whole tree for which to return UUIDs.
"""
with self.lock:
return self._find_with_lock(
name_or_uuid, return_root=True).get_provider_uuids()
def populate_from_iterable(self, provider_dicts):
"""Populates this ProviderTree from an iterable of provider dicts.
This method will ADD providers to the tree if provider_dicts contains
providers that do not exist in the tree already and will REPLACE
providers in the tree if provider_dicts contains providers that are
already in the tree. This method will NOT remove providers from the
tree that are not in provider_dicts. But if a parent provider is in
provider_dicts and the descendents are not, this method will remove the
descendents from the tree.
:param provider_dicts: An iterable of dicts of resource provider
information. If a provider is present in
provider_dicts, all its descendants must also be
present.
:raises: ValueError if any provider in provider_dicts has a parent that
is not in this ProviderTree or elsewhere in provider_dicts.
"""
if not provider_dicts:
return
# Map of provider UUID to provider dict for the providers we're
# *adding* via this method.
to_add_by_uuid = {pd['uuid']: pd for pd in provider_dicts}
with self.lock:
# Sanity check for orphans. Every parent UUID must either be None
# (the provider is a root), or be in the tree already, or exist as
# a key in to_add_by_uuid (we're adding it).
all_parents = set([None]) | set(to_add_by_uuid)
# NOTE(efried): Can't use get_provider_uuids directly because we're
# already under lock.
for root in self.roots:
all_parents |= set(root.get_provider_uuids())
missing_parents = set()
for pd in to_add_by_uuid.values():
parent_uuid = pd.get('parent_provider_uuid')
if parent_uuid not in all_parents:
missing_parents.add(parent_uuid)
if missing_parents:
raise ValueError(
_("The following parents were not found: %s") %
', '.join(missing_parents))
# Ready to do the work.
# Use to_add_by_uuid to keep track of which providers are left to
# be added.
while to_add_by_uuid:
# Find a provider that's suitable to inject.
for uuid, pd in to_add_by_uuid.items():
# Roots are always okay to inject (None won't be a key in
# to_add_by_uuid). Otherwise, we have to make sure we
# already added the parent (and, by recursion, all
# ancestors) if present in the input.
parent_uuid = pd.get('parent_provider_uuid')
if parent_uuid not in to_add_by_uuid:
break
else:
# This should never happen - we already ensured all parents
# exist in the tree, which means we can't have any branches
# that don't wind up at the root, which means we can't have
# cycles. But to quell the paranoia...
raise ValueError(
_("Unexpectedly failed to find parents already in the "
"tree for any of the following: %s") %
','.join(set(to_add_by_uuid)))
# Add or replace the provider, either as a root or under its
# parent
try:
self._remove_with_lock(uuid)
except ValueError:
# Wasn't there in the first place - fine.
pass
provider = _Provider.from_dict(pd)
if parent_uuid is None:
self.roots.append(provider)
else:
parent = self._find_with_lock(parent_uuid)
parent.add_child(provider)
# Remove this entry to signify we're done with it.
to_add_by_uuid.pop(uuid)
def _remove_with_lock(self, name_or_uuid):
found = self._find_with_lock(name_or_uuid)
if found.parent_uuid:
parent = self._find_with_lock(found.parent_uuid)
parent.remove_child(found)
else:
self.roots.remove(found)
def remove(self, name_or_uuid):
"""Safely removes the provider identified by the supplied name_or_uuid
parameter and all of its children from the tree.
:raises ValueError if name_or_uuid points to a non-existing provider.
:param name_or_uuid: Either name or UUID of the resource provider to
remove from the tree.
"""
with self.lock:
self._remove_with_lock(name_or_uuid)
def new_root(self, name, uuid, generation=None):
"""Adds a new root provider to the tree, returning its UUID.
:param name: The name of the new root provider
:param uuid: The UUID of the new root provider
:param generation: Generation to set for the new root provider
:returns: the UUID of the new provider
:raises: ValueError if a provider with the specified uuid already
exists in the tree.
"""
with self.lock:
exists = True
try:
self._find_with_lock(uuid)
except ValueError:
exists = False
if exists:
err = _("Provider %s already exists.")
raise ValueError(err % uuid)
p = _Provider(name, uuid=uuid, generation=generation)
self.roots.append(p)
return p.uuid
def _find_with_lock(self, name_or_uuid, return_root=False):
for root in self.roots:
found = root.find(name_or_uuid)
if found:
return root if return_root else found
raise ValueError(_("No such provider %s") % name_or_uuid)
def data(self, name_or_uuid):
"""Return a point-in-time copy of the specified provider's data.
:param name_or_uuid: Either name or UUID of the resource provider whose
data is to be returned.
:return: ProviderData object representing the specified provider.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
"""
with self.lock:
return self._find_with_lock(name_or_uuid).data()
def exists(self, name_or_uuid):
"""Given either a name or a UUID, return True if the tree contains the
provider, False otherwise.
"""
with self.lock:
try:
self._find_with_lock(name_or_uuid)
return True
except ValueError:
return False
def new_child(self, name, parent, uuid=None, generation=None):
"""Creates a new child provider with the given name and uuid under the
given parent.
:param name: The name of the new child provider
:param parent: Either name or UUID of the parent provider
:param uuid: The UUID of the new child provider
:param generation: Generation to set for the new child provider
:returns: the UUID of the new provider
:raises ValueError if a provider with the specified uuid or name
already exists; or if parent_uuid points to a nonexistent
provider.
"""
with self.lock:
try:
self._find_with_lock(uuid or name)
except ValueError:
pass
else:
err = _("Provider %s already exists.")
raise ValueError(err % (uuid or name))
parent_node = self._find_with_lock(parent)
p = _Provider(name, uuid, generation, parent_node.uuid)
parent_node.add_child(p)
return p.uuid
def has_inventory(self, name_or_uuid):
"""Returns True if the provider identified by name_or_uuid has any
inventory records at all.
:raises: ValueError if a provider with uuid was not found in the tree.
:param name_or_uuid: Either name or UUID of the resource provider
"""
with self.lock:
p = self._find_with_lock(name_or_uuid)
return p.has_inventory()
def has_inventory_changed(self, name_or_uuid, inventory):
"""Returns True if the supplied inventory is different for the provider
with the supplied name or UUID.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
:param name_or_uuid: Either name or UUID of the resource provider to
query inventory for.
:param inventory: dict, keyed by resource class, of inventory
information.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
return provider.has_inventory_changed(inventory)
def update_inventory(self, name_or_uuid, inventory, generation=None):
"""Given a name or UUID of a provider and a dict of inventory resource
records, update the provider's inventory and set the provider's
generation.
:returns: True if the inventory has changed.
:note: The provider's generation is always set to the supplied
generation, even if there were no changes to the inventory.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
:param name_or_uuid: Either name or UUID of the resource provider to
update inventory for.
:param inventory: dict, keyed by resource class, of inventory
information.
:param generation: The resource provider generation to set. If not
specified, the provider's generation is not changed.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
return provider.update_inventory(inventory, generation)
def has_sharing_provider(self, resource_class):
"""Returns whether the specified provider_tree contains any sharing
providers of inventory of the specified resource_class.
"""
for rp_uuid in self.get_provider_uuids():
pdata = self.data(rp_uuid)
has_rc = resource_class in pdata.inventory
is_sharing = os_traits.MISC_SHARES_VIA_AGGREGATE in pdata.traits
if has_rc and is_sharing:
return True
return False
def has_traits(self, name_or_uuid, traits):
"""Given a name or UUID of a provider, query whether that provider has
*all* of the specified traits.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
:param name_or_uuid: Either name or UUID of the resource provider to
query for traits.
:param traits: Iterable of string trait names to search for.
:return: True if this provider has *all* of the specified traits; False
if any of the specified traits are absent. Returns True if
the traits parameter is empty, even if the provider has no
traits.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
return provider.has_traits(traits)
def have_traits_changed(self, name_or_uuid, traits):
"""Returns True if the specified traits list is different for the
provider with the specified name or UUID.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
:param name_or_uuid: Either name or UUID of the resource provider to
query traits for.
:param traits: Iterable of string trait names to compare against the
provider's traits.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
return provider.have_traits_changed(traits)
def update_traits(self, name_or_uuid, traits, generation=None):
"""Given a name or UUID of a provider and an iterable of string trait
names, update the provider's traits and set the provider's generation.
:returns: True if the traits list has changed.
:note: The provider's generation is always set to the supplied
generation, even if there were no changes to the traits.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
:param name_or_uuid: Either name or UUID of the resource provider to
update traits for.
:param traits: Iterable of string trait names to set.
:param generation: The resource provider generation to set. If None,
the provider's generation is not changed.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
return provider.update_traits(traits, generation=generation)
def add_traits(self, name_or_uuid, *traits):
"""Set traits on a provider, without affecting existing traits.
:param name_or_uuid: The name or UUID of the provider whose traits are
to be affected.
:param traits: String names of traits to be added.
"""
if not traits:
return
with self.lock:
provider = self._find_with_lock(name_or_uuid)
final_traits = provider.traits | set(traits)
provider.update_traits(final_traits)
def remove_traits(self, name_or_uuid, *traits):
"""Unset traits on a provider, without affecting other existing traits.
:param name_or_uuid: The name or UUID of the provider whose traits are
to be affected.
:param traits: String names of traits to be removed.
"""
if not traits:
return
with self.lock:
provider = self._find_with_lock(name_or_uuid)
final_traits = provider.traits - set(traits)
provider.update_traits(final_traits)
def in_aggregates(self, name_or_uuid, aggregates):
"""Given a name or UUID of a provider, query whether that provider is a
member of *all* the specified aggregates.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
:param name_or_uuid: Either name or UUID of the resource provider to
query for aggregates.
:param aggregates: Iterable of string aggregate UUIDs to search for.
:return: True if this provider is associated with *all* of the
specified aggregates; False if any of the specified aggregates
are absent. Returns True if the aggregates parameter is
empty, even if the provider has no aggregate associations.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
return provider.in_aggregates(aggregates)
def have_aggregates_changed(self, name_or_uuid, aggregates):
"""Returns True if the specified aggregates list is different for the
provider with the specified name or UUID.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
:param name_or_uuid: Either name or UUID of the resource provider to
query aggregates for.
:param aggregates: Iterable of string aggregate UUIDs to compare
against the provider's aggregates.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
return provider.have_aggregates_changed(aggregates)
def update_aggregates(self, name_or_uuid, aggregates, generation=None):
"""Given a name or UUID of a provider and an iterable of string
aggregate UUIDs, update the provider's aggregates and set the
provider's generation.
:returns: True if the aggregates list has changed.
:note: The provider's generation is always set to the supplied
generation, even if there were no changes to the aggregates.
:raises: ValueError if a provider with name_or_uuid was not found in
the tree.
:param name_or_uuid: Either name or UUID of the resource provider to
update aggregates for.
:param aggregates: Iterable of string aggregate UUIDs to set.
:param generation: The resource provider generation to set. If None,
the provider's generation is not changed.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
return provider.update_aggregates(aggregates,
generation=generation)
def add_aggregates(self, name_or_uuid, *aggregates):
"""Set aggregates on a provider, without affecting existing aggregates.
:param name_or_uuid: The name or UUID of the provider whose aggregates
are to be affected.
:param aggregates: String UUIDs of aggregates to be added.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
final_aggs = provider.aggregates | set(aggregates)
provider.update_aggregates(final_aggs)
def remove_aggregates(self, name_or_uuid, *aggregates):
"""Unset aggregates on a provider, without affecting other existing
aggregates.
:param name_or_uuid: The name or UUID of the provider whose aggregates
are to be affected.
:param aggregates: String UUIDs of aggregates to be removed.
"""
with self.lock:
provider = self._find_with_lock(name_or_uuid)
final_aggs = provider.aggregates - set(aggregates)
provider.update_aggregates(final_aggs)

View File

@ -29,6 +29,7 @@ from zun.conf import network
from zun.conf import neutron_client
from zun.conf import path
from zun.conf import pci
from zun.conf import placement_client
from zun.conf import profiler
from zun.conf import quota
from zun.conf import scheduler
@ -65,3 +66,4 @@ cinder_client.register_opts(CONF)
netconf.register_opts(CONF)
availability_zone.register_opts(CONF)
utils.register_opts(CONF)
placement_client.register_opts(CONF)

View File

@ -29,6 +29,25 @@ compute_opts = [
'enable_cpu_pinning',
default=False,
help='allow the container with cpu_policy is dedicated'),
cfg.IntOpt(
'resource_provider_association_refresh',
default=300,
min=0,
mutable=True,
# TODO(efried): Provide more/better explanation of what this option is
# all about. Reference bug(s). Unless we're just going to remove it.
help="""
Interval for updating zun-compute-side cache of the compute node resource
provider's inventories, aggregates, and traits.
This option specifies the number of seconds between attempts to update a
provider's inventories, aggregates and traits in the local cache of the compute
node.
A value of zero disables cache refresh completely.
The cache can be cleared manually at any time by sending SIGHUP to the compute
process, causing it to be repopulated the next time the data is accessed.
Possible values:
* Any positive integer in seconds, or zero to disable refresh.
"""),
]
service_opts = [

View File

@ -0,0 +1,53 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
placement_group = cfg.OptGroup(
name='placement_client',
title='Placement Service Options',
help="Configuration options for connecting to the placement API service")
common_security_opts = [
cfg.StrOpt('ca_file',
help='Optional CA cert file to use in SSL connections.'),
cfg.StrOpt('cert_file',
help='Optional PEM-formatted certificate chain file.'),
cfg.StrOpt('key_file',
help='Optional PEM-formatted file that contains the '
'private key.'),
cfg.BoolOpt('insecure',
default=False,
help="If set, then the server's certificate will not "
"be verified.")]
placement_client_opts = [
cfg.StrOpt('region_name',
help='Region in Identity service catalog to use for '
'communication with the OpenStack service.'),
cfg.StrOpt('endpoint_type',
default='publicURL',
help='Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')]
ALL_OPTS = (placement_client_opts + common_security_opts)
def register_opts(conf):
conf.register_group(placement_group)
conf.register_opts(ALL_OPTS, group=placement_group)
def list_opts():
return {placement_group: ALL_OPTS}

View File

@ -92,6 +92,25 @@ Related options:
* All of the filters in this option *must* be present in the
'scheduler_available_filters' option, or a SchedulerHostFilterNotFound
exception will be raised.
"""),
cfg.IntOpt("max_placement_results",
default=1000,
min=1,
help="""
This setting determines the maximum limit on results received from the
placement service during a scheduling operation. It effectively limits
the number of hosts that may be considered for scheduling requests that
match a large number of candidates.
A value of 1 (the minimum) will effectively defer scheduling to the placement
service strictly on "will it fit" grounds. A higher value will put an upper
cap on the number of results the scheduler will consider during the filtering
and weighing process. Large deployments may need to set this lower than the
total number of hosts available to limit memory consumption, network traffic,
etc. of the scheduler.
This option is only used by the FilterScheduler; if you use a different
scheduler, this option has no effect.
"""),
]

View File

@ -23,6 +23,7 @@ from zun.objects import pci_device_pool
from zun.objects import quota
from zun.objects import quota_class
from zun.objects import registry
from zun.objects import request_group
from zun.objects import resource_class
from zun.objects import resource_provider
from zun.objects import volume
@ -54,6 +55,7 @@ ContainerAction = container_action.ContainerAction
ContainerActionEvent = container_action.ContainerActionEvent
ExecInstance = exec_instance.ExecInstance
Registry = registry.Registry
RequestGroup = request_group.RequestGroup
__all__ = (
'Container',
@ -79,4 +81,5 @@ __all__ = (
'ContainerActionEvent',
'ExecInstance',
'Registry',
'RequestGroup',
)

View File

@ -83,6 +83,14 @@ class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class SetOfStringsField(fields.AutoTypedField):
AUTO_TYPE = fields.Set(fields.String())
class ListOfListsOfStringsField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.List(fields.String()))
class ResourceClass(fields.Enum):
ALL = consts.RESOURCE_CLASSES
@ -119,8 +127,8 @@ class PciDeviceType(BaseZunEnum):
class PciDeviceTypeField(fields.BaseEnumField):
AUTO_TYPE = PciDeviceType()
AUTO_TYPE = PciDeviceType()
class PciDeviceStatusField(fields.BaseEnumField):
AUTO_TYPE = PciDeviceStatus()
AUTO_TYPE = PciDeviceStatus()

View File

@ -0,0 +1,48 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from zun.objects import base
from zun.objects import fields as zun_fields
@base.ZunObjectRegistry.register
class RequestGroup(base.ZunPersistentObject, base.ZunObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'use_same_provider': fields.BooleanField(default=True),
'resources': fields.DictOfIntegersField(default={}),
'required_traits': zun_fields.SetOfStringsField(default=set()),
'forbidden_traits': zun_fields.SetOfStringsField(default=set()),
# The aggregates field has a form of
# [[aggregate_UUID1],
# [aggregate_UUID2, aggregate_UUID3]]
# meaning that the request should be fulfilled from an RP that is a
# member of the aggregate aggregate_UUID1 and member of the aggregate
# aggregate_UUID2 or aggregate_UUID3 .
'aggregates': zun_fields.ListOfListsOfStringsField(default=[]),
# The entity the request is coming from (e.g. the Neutron port uuid)
# which may not always be a UUID.
'requester_id': fields.StringField(nullable=True, default=None),
# The resource provider UUIDs that together fulfill the request
# NOTE(gibi): this can be more than one if this is the unnumbered
# request group (i.e. use_same_provider=False)
'provider_uuids': fields.ListOfUUIDField(default=[]),
'in_tree': fields.UUIDField(nullable=True, default=None),
}
def __init__(self, context=None, **kwargs):
super(RequestGroup, self).__init__(context=context, **kwargs)
self.obj_set_defaults()

View File

File diff suppressed because it is too large Load Diff

282
zun/scheduler/utils.py Normal file
View File

@ -0,0 +1,282 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for scheduling."""
import collections
import re
import os_resource_classes as orc
from oslo_log import log as logging
from six.moves.urllib import parse
import zun.conf
from zun import objects
LOG = logging.getLogger(__name__)
CONF = zun.conf.CONF
class ResourceRequest(object):
"""Presents a granular resource request via RequestGroup instances."""
# extra_specs-specific consts
XS_RES_PREFIX = 'resources'
XS_TRAIT_PREFIX = 'trait'
# Regex patterns for numbered or un-numbered resources/trait keys
XS_KEYPAT = re.compile(r"^(%s)([1-9][0-9]*)?:(.*)$" %
'|'.join((XS_RES_PREFIX, XS_TRAIT_PREFIX)))
def __init__(self):
# { ident: RequestGroup }
self._rg_by_id = {}
self._group_policy = None
# Default to the configured limit but _limit can be
# set to None to indicate "no limit".
self._limit = CONF.scheduler.max_placement_results
def __str__(self):
return ', '.join(sorted(
list(str(rg) for rg in list(self._rg_by_id.values()))))
@property
def group_policy(self):
return self._group_policy
@group_policy.setter
def group_policy(self, value):
self._group_policy = value
def get_request_group(self, ident):
if ident not in self._rg_by_id:
rq_grp = objects.RequestGroup(use_same_provider=bool(ident))
self._rg_by_id[ident] = rq_grp
return self._rg_by_id[ident]
def add_request_group(self, request_group):
"""Inserts the existing group with a unique integer id
This function can ensure unique ids by using bigger
ids than the maximum of existing ids.
:param request_group: the RequestGroup to be added
"""
# NOTE(gibi) [0] just here to always have a defined maximum
group_idents = [0] + [int(ident) for ident in self._rg_by_id if ident]
ident = max(group_idents) + 1
self._rg_by_id[ident] = request_group
def _add_resource(self, groupid, rclass, amount):
# Validate the class.
if not (rclass.startswith(orc.CUSTOM_NAMESPACE) or
rclass in orc.STANDARDS):
LOG.warning(
"Received an invalid ResourceClass '%(key)s' in extra_specs.",
{"key": rclass})
return
# val represents the amount. Convert to int, or warn and skip.
try:
amount = int(amount)
if amount < 0:
raise ValueError()
except ValueError:
LOG.warning(
"Resource amounts must be nonnegative integers. Received "
"'%(val)s' for key resources%(groupid)s.",
{"groupid": groupid or '', "val": amount})
return
self.get_request_group(groupid).resources[rclass] = amount
def _add_trait(self, groupid, trait_name, trait_type):
# Currently the only valid values for a trait entry are 'required'
# and 'forbidden'
trait_vals = ('required', 'forbidden')
if trait_type == 'required':
self.get_request_group(groupid).required_traits.add(trait_name)
elif trait_type == 'forbidden':
self.get_request_group(groupid).forbidden_traits.add(trait_name)
else:
LOG.warning(
"Only (%(tvals)s) traits are supported. Received '%(val)s' "
"for key trait%(groupid)s.",
{"tvals": ', '.join(trait_vals), "groupid": groupid or '',
"val": trait_type})
return
def _add_group_policy(self, policy):
# The only valid values for group_policy are 'none' and 'isolate'.
if policy not in ('none', 'isolate'):
LOG.warning(
"Invalid group_policy '%s'. Valid values are 'none' and "
"'isolate'.", policy)
return
self._group_policy = policy
@classmethod
def from_extra_specs(cls, extra_specs, req=None):
"""Processes resources and traits in numbered groupings in extra_specs.
Examines extra_specs for items of the following forms:
"resources:$RESOURCE_CLASS": $AMOUNT
"resources$N:$RESOURCE_CLASS": $AMOUNT
"trait:$TRAIT_NAME": "required"
"trait$N:$TRAIT_NAME": "required"
Does *not* yet handle member_of[$N].
:param extra_specs: The extra_specs dict.
:param req: the ResourceRequest object to add the requirements to or
None to create a new ResourceRequest
:return: A ResourceRequest object representing the resources and
required traits in the extra_specs.
"""
# TODO(efried): Handle member_of[$N], which will need to be reconciled
# with destination.aggregates handling in resources_from_request_spec
if req is not None:
ret = req
else:
ret = cls()
for key, val in extra_specs.items():
if key == 'group_policy':
ret._add_group_policy(val)
continue
match = cls.XS_KEYPAT.match(key)
if not match:
continue
# 'prefix' is 'resources' or 'trait'
# 'suffix' is $N or None
# 'name' is either the resource class name or the trait name.
prefix, suffix, name = match.groups()
# Process "resources[$N]"
if prefix == cls.XS_RES_PREFIX:
ret._add_resource(suffix, name, val)
# Process "trait[$N]"
elif prefix == cls.XS_TRAIT_PREFIX:
ret._add_trait(suffix, name, val)
return ret
def resource_groups(self):
for rg in self._rg_by_id.values():
yield rg.resources
def get_num_of_numbered_groups(self):
return len([ident for ident in self._rg_by_id.keys()
if ident is not None])
def merged_resources(self, resources=None):
"""Returns a merge of {resource_class: amount} for all resource groups.
Amounts of the same resource class from different groups are added
together.
:param resources: A flat dict of {resource_class: amount}. If
specified, the resources therein are folded
into the return dict, such that any resource
in resources is included only if that
resource class does not exist elsewhere in the
merged ResourceRequest.
:return: A dict of the form {resource_class: amount}
"""
ret = collections.defaultdict(lambda: 0)
for resource_dict in self.resource_groups():
for resource_class, amount in resource_dict.items():
ret[resource_class] += amount
if resources:
for resource_class, amount in resources.items():
# If it's in there - even if zero - ignore the one from the
# flavor.
if resource_class not in ret:
ret[resource_class] = amount
# Now strip zeros. This has to be done after the above - we can't
# use strip_zeros :(
ret = {rc: amt for rc, amt in ret.items() if amt}
return dict(ret)
def _clean_empties(self):
"""Get rid of any empty ResourceGroup instances."""
for ident, rg in list(self._rg_by_id.items()):
if not any((rg.resources, rg.required_traits,
rg.forbidden_traits)):
self._rg_by_id.pop(ident)
def strip_zeros(self):
"""Remove any resources whose amounts are zero."""
for resource_dict in self.resource_groups():
for rclass in list(resource_dict):
if resource_dict[rclass] == 0:
resource_dict.pop(rclass)
self._clean_empties()
def to_querystring(self):
"""Produce a querystring of the form expected by
GET /allocation_candidates.
"""
# TODO(gibi): We have a RequestGroup OVO so we can move this to that
# class as a member function.
# NOTE(efried): The sorting herein is not necessary for the API; it is
# to make testing easier and logging/debugging predictable.
def to_queryparams(request_group, suffix):
res = request_group.resources
required_traits = request_group.required_traits
forbidden_traits = request_group.forbidden_traits
aggregates = request_group.aggregates
in_tree = request_group.in_tree
resource_query = ",".join(
sorted("%s:%s" % (rc, amount)
for (rc, amount) in res.items()))
qs_params = [('resources%s' % suffix, resource_query)]
# Assemble required and forbidden traits, allowing for either/both
# to be empty.
required_val = ','.join(
sorted(required_traits) +
['!%s' % ft for ft in sorted(forbidden_traits)])
if required_val:
qs_params.append(('required%s' % suffix, required_val))
if aggregates:
aggs = []
# member_ofN is a list of lists. We need a tuple of
# ('member_ofN', 'in:uuid,uuid,...') for each inner list.
for agglist in aggregates:
aggs.append(('member_of%s' % suffix,
'in:' + ','.join(sorted(agglist))))
qs_params.extend(sorted(aggs))
if in_tree:
qs_params.append(('in_tree%s' % suffix, in_tree))
return qs_params
if self._limit is not None:
qparams = [('limit', self._limit)]
else:
qparams = []
if self._group_policy is not None:
qparams.append(('group_policy', self._group_policy))
for ident, rg in self._rg_by_id.items():
# [('resourcesN', 'rclass:amount,rclass:amount,...'),
# ('requiredN', 'trait_name,!trait_name,...'),
# ('member_ofN', 'in:uuid,uuid,...'),
# ('member_ofN', 'in:uuid,uuid,...')]
qparams.extend(to_queryparams(rg, ident or ''))
return parse.urlencode(sorted(qparams))

View File

@ -0,0 +1,692 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils.fixture import uuidsentinel as uuids
from zun.common import context
from zun.compute import provider_tree
from zun.tests import base
from zun.tests.unit.objects import utils
class TestProviderTree(base.TestCase):
def setUp(self):
super(TestProviderTree, self).setUp()
self.context = context.get_admin_context()
self.compute_node1 = utils.get_test_compute_node(
self.context, uuid=uuids.cn1, hostname='compute-node-1')
self.compute_node2 = utils.get_test_compute_node(
self.context, uuid=uuids.cn2, hostname='compute-node-2')
self.compute_nodes = [self.compute_node1, self.compute_node2]
def _pt_with_cns(self):
pt = provider_tree.ProviderTree()
for cn in self.compute_nodes:
pt.new_root(cn.hostname, cn.uuid, generation=0)
return pt
def test_tree_ops(self):
cn1 = self.compute_node1
cn2 = self.compute_node2
pt = self._pt_with_cns()
self.assertRaises(
ValueError,
pt.new_root,
cn1.hostname,
cn1.uuid,
)
self.assertTrue(pt.exists(cn1.uuid))
self.assertTrue(pt.exists(cn1.hostname))
self.assertFalse(pt.exists(uuids.non_existing_rp))
self.assertFalse(pt.exists('noexist'))
self.assertEqual([cn1.uuid],
pt.get_provider_uuids(name_or_uuid=cn1.uuid))
# Same with ..._in_tree
self.assertEqual([cn1.uuid], pt.get_provider_uuids_in_tree(cn1.uuid))
self.assertEqual(set([cn1.uuid, cn2.uuid]),
set(pt.get_provider_uuids()))
numa_cell0_uuid = pt.new_child('numa_cell0', cn1.uuid)
numa_cell1_uuid = pt.new_child('numa_cell1', cn1.hostname)
self.assertEqual(cn1.uuid, pt.data(numa_cell1_uuid).parent_uuid)
self.assertTrue(pt.exists(numa_cell0_uuid))
self.assertTrue(pt.exists('numa_cell0'))
self.assertTrue(pt.exists(numa_cell1_uuid))
self.assertTrue(pt.exists('numa_cell1'))
pf1_cell0_uuid = pt.new_child('pf1_cell0', numa_cell0_uuid)
self.assertTrue(pt.exists(pf1_cell0_uuid))
self.assertTrue(pt.exists('pf1_cell0'))
# Now we've got a 3-level tree under cn1 - check provider UUIDs again
all_cn1 = [cn1.uuid, numa_cell0_uuid, pf1_cell0_uuid, numa_cell1_uuid]
self.assertEqual(
set(all_cn1),
set(pt.get_provider_uuids(name_or_uuid=cn1.uuid)))
# Same with ..._in_tree if we're asking for the root
self.assertEqual(
set(all_cn1),
set(pt.get_provider_uuids_in_tree(cn1.uuid)))
# Asking for a subtree.
self.assertEqual(
[numa_cell0_uuid, pf1_cell0_uuid],
pt.get_provider_uuids(name_or_uuid=numa_cell0_uuid))
# With ..._in_tree, get the whole tree no matter which we specify.
for node in all_cn1:
self.assertEqual(set(all_cn1), set(pt.get_provider_uuids_in_tree(
node)))
# With no provider specified, get everything
self.assertEqual(
set([cn1.uuid, cn2.uuid, numa_cell0_uuid, pf1_cell0_uuid,
numa_cell1_uuid]),
set(pt.get_provider_uuids()))
self.assertRaises(
ValueError,
pt.new_child,
'pf1_cell0',
uuids.non_existing_rp,
)
# Fail attempting to add a child that already exists in the tree
# Existing provider is a child; search by name
self.assertRaises(ValueError, pt.new_child, 'numa_cell0', cn1.uuid)
# Existing provider is a root; search by UUID
self.assertRaises(ValueError, pt.new_child, cn1.uuid, cn2.uuid)
# Test data().
# Root, by UUID
cn1_snap = pt.data(cn1.uuid)
# Fields were faithfully copied
self.assertEqual(cn1.uuid, cn1_snap.uuid)
self.assertEqual(cn1.hostname, cn1_snap.name)
self.assertIsNone(cn1_snap.parent_uuid)
self.assertEqual({}, cn1_snap.inventory)
self.assertEqual(set(), cn1_snap.traits)
self.assertEqual(set(), cn1_snap.aggregates)
# Validate read-only-ness
self.assertRaises(AttributeError, setattr, cn1_snap, 'name', 'foo')
cn3 = utils.get_test_compute_node(
self.context, uuid=uuids.cn3, hostname='compute-node-3')
self.assertFalse(pt.exists(cn3.uuid))
self.assertFalse(pt.exists(cn3.hostname))
pt.new_root(cn3.hostname, cn3.uuid)
self.assertTrue(pt.exists(cn3.uuid))
self.assertTrue(pt.exists(cn3.hostname))
self.assertRaises(
ValueError,
pt.new_root,
cn3.hostname,
cn3.uuid,
)
self.assertRaises(
ValueError,
pt.remove,
uuids.non_existing_rp,
)
pt.remove(numa_cell1_uuid)
self.assertFalse(pt.exists(numa_cell1_uuid))
self.assertTrue(pt.exists(pf1_cell0_uuid))
self.assertTrue(pt.exists(numa_cell0_uuid))
self.assertTrue(pt.exists(uuids.cn1))
# Now remove the root and check that children no longer exist
pt.remove(uuids.cn1)
self.assertFalse(pt.exists(pf1_cell0_uuid))
self.assertFalse(pt.exists(numa_cell0_uuid))
self.assertFalse(pt.exists(uuids.cn1))
def test_populate_from_iterable_empty(self):
pt = provider_tree.ProviderTree()
# Empty list is a no-op
pt.populate_from_iterable([])
self.assertEqual([], pt.get_provider_uuids())
def test_populate_from_iterable_error_orphan_cycle(self):
pt = provider_tree.ProviderTree()
# Error trying to populate with an orphan
grandchild1_1 = {
'uuid': uuids.grandchild1_1,
'name': 'grandchild1_1',
'generation': 11,
'parent_provider_uuid': uuids.child1,
}
self.assertRaises(ValueError,
pt.populate_from_iterable, [grandchild1_1])
# Create a cycle so there are no orphans, but no path to a root
cycle = {
'uuid': uuids.child1,
'name': 'child1',
'generation': 1,
# There's a country song about this
'parent_provider_uuid': uuids.grandchild1_1,
}
self.assertRaises(ValueError,
pt.populate_from_iterable, [grandchild1_1, cycle])
def test_populate_from_iterable_complex(self):
# root
# +-> child1
# | +-> grandchild1_2
# | +-> ggc1_2_1
# | +-> ggc1_2_2
# | +-> ggc1_2_3
# +-> child2
# another_root
pt = provider_tree.ProviderTree()
plist = [
{
'uuid': uuids.root,
'name': 'root',
'generation': 0,
},
{
'uuid': uuids.child1,
'name': 'child1',
'generation': 1,
'parent_provider_uuid': uuids.root,
},
{
'uuid': uuids.child2,
'name': 'child2',
'generation': 2,
'parent_provider_uuid': uuids.root,
},
{
'uuid': uuids.grandchild1_2,
'name': 'grandchild1_2',
'generation': 12,
'parent_provider_uuid': uuids.child1,
},
{
'uuid': uuids.ggc1_2_1,
'name': 'ggc1_2_1',
'generation': 121,
'parent_provider_uuid': uuids.grandchild1_2,
},
{
'uuid': uuids.ggc1_2_2,
'name': 'ggc1_2_2',
'generation': 122,
'parent_provider_uuid': uuids.grandchild1_2,
},
{
'uuid': uuids.ggc1_2_3,
'name': 'ggc1_2_3',
'generation': 123,
'parent_provider_uuid': uuids.grandchild1_2,
},
{
'uuid': uuids.another_root,
'name': 'another_root',
'generation': 911,
},
]
pt.populate_from_iterable(plist)
def validate_root(expected_uuids):
# Make sure we have all and only the expected providers
self.assertEqual(expected_uuids, set(pt.get_provider_uuids()))
# Now make sure they're in the right hierarchy. Cheat: get the
# actual _Provider to make it easier to walk the tree (ProviderData
# doesn't include children).
root = pt._find_with_lock(uuids.root)
self.assertEqual(uuids.root, root.uuid)
self.assertEqual('root', root.name)
self.assertEqual(0, root.generation)
self.assertIsNone(root.parent_uuid)
self.assertEqual(2, len(list(root.children)))
for child in root.children.values():
self.assertTrue(child.name.startswith('child'))
if child.name == 'child1':
if uuids.grandchild1_1 in expected_uuids:
self.assertEqual(2, len(list(child.children)))
else:
self.assertEqual(1, len(list(child.children)))
for grandchild in child.children.values():
self.assertTrue(grandchild.name.startswith(
'grandchild1_'))
if grandchild.name == 'grandchild1_1':
self.assertEqual(0, len(list(grandchild.children)))
if grandchild.name == 'grandchild1_2':
self.assertEqual(3, len(list(grandchild.children)))
for ggc in grandchild.children.values():
self.assertTrue(ggc.name.startswith('ggc1_2_'))
another_root = pt._find_with_lock(uuids.another_root)
self.assertEqual(uuids.another_root, another_root.uuid)
self.assertEqual('another_root', another_root.name)
self.assertEqual(911, another_root.generation)
self.assertIsNone(another_root.parent_uuid)
self.assertEqual(0, len(list(another_root.children)))
if uuids.new_root in expected_uuids:
new_root = pt._find_with_lock(uuids.new_root)
self.assertEqual(uuids.new_root, new_root.uuid)
self.assertEqual('new_root', new_root.name)
self.assertEqual(42, new_root.generation)
self.assertIsNone(new_root.parent_uuid)
self.assertEqual(0, len(list(new_root.children)))
expected_uuids = set([
uuids.root, uuids.child1, uuids.child2, uuids.grandchild1_2,
uuids.ggc1_2_1, uuids.ggc1_2_2, uuids.ggc1_2_3,
uuids.another_root])
validate_root(expected_uuids)
# Merge an orphan - still an error
orphan = {
'uuid': uuids.orphan,
'name': 'orphan',
'generation': 86,
'parent_provider_uuid': uuids.mystery,
}
self.assertRaises(ValueError, pt.populate_from_iterable, [orphan])
# And the tree didn't change
validate_root(expected_uuids)
# Merge a list with a new grandchild and a new root
plist = [
{
'uuid': uuids.grandchild1_1,
'name': 'grandchild1_1',
'generation': 11,
'parent_provider_uuid': uuids.child1,
},
{
'uuid': uuids.new_root,
'name': 'new_root',
'generation': 42,
},
]
pt.populate_from_iterable(plist)
expected_uuids |= set([uuids.grandchild1_1, uuids.new_root])
validate_root(expected_uuids)
# Merge an empty list - still a no-op
pt.populate_from_iterable([])
validate_root(expected_uuids)
# Since we have a complex tree, test the ordering of get_provider_uuids
# We can't predict the order of siblings, or where nephews will appear
# relative to their uncles, but we can guarantee that any given child
# always comes after its parent (and by extension, its ancestors too).
puuids = pt.get_provider_uuids()
for desc in (uuids.child1, uuids.child2):
self.assertGreater(puuids.index(desc), puuids.index(uuids.root))
for desc in (uuids.grandchild1_1, uuids.grandchild1_2):
self.assertGreater(puuids.index(desc), puuids.index(uuids.child1))
for desc in (uuids.ggc1_2_1, uuids.ggc1_2_2, uuids.ggc1_2_3):
self.assertGreater(
puuids.index(desc), puuids.index(uuids.grandchild1_2))
def test_populate_from_iterable_with_root_update(self):
# Ensure we can update hierarchies, including adding children, in a
# tree that's already populated. This tests the case where a given
# provider exists both in the tree and in the input. We must replace
# that provider *before* we inject its descendants; otherwise the
# descendants will be lost. Note that this test case is not 100%
# reliable, as we can't predict the order over which hashed values are
# iterated.
pt = provider_tree.ProviderTree()
# Let's create a root
plist = [
{
'uuid': uuids.root,
'name': 'root',
'generation': 0,
},
]
pt.populate_from_iterable(plist)
expected_uuids = [uuids.root]
self.assertEqual(expected_uuids, pt.get_provider_uuids())
# Let's add a child updating the name and generation for the root.
# root
# +-> child1
plist = [
{
'uuid': uuids.root,
'name': 'root_with_new_name',
'generation': 1,
},
{
'uuid': uuids.child1,
'name': 'child1',
'generation': 1,
'parent_provider_uuid': uuids.root,
},
]
pt.populate_from_iterable(plist)
expected_uuids = [uuids.root, uuids.child1]
self.assertEqual(expected_uuids, pt.get_provider_uuids())
def test_populate_from_iterable_disown_grandchild(self):
# Start with:
# root
# +-> child
# | +-> grandchild
# Then send in [child] and grandchild should disappear.
child = {
'uuid': uuids.child,
'name': 'child',
'generation': 1,
'parent_provider_uuid': uuids.root,
}
pt = provider_tree.ProviderTree()
plist = [
{
'uuid': uuids.root,
'name': 'root',
'generation': 0,
},
child,
{
'uuid': uuids.grandchild,
'name': 'grandchild',
'generation': 2,
'parent_provider_uuid': uuids.child,
},
]
pt.populate_from_iterable(plist)
self.assertEqual([uuids.root, uuids.child, uuids.grandchild],
pt.get_provider_uuids())
self.assertTrue(pt.exists(uuids.grandchild))
pt.populate_from_iterable([child])
self.assertEqual([uuids.root, uuids.child], pt.get_provider_uuids())
self.assertFalse(pt.exists(uuids.grandchild))
def test_has_inventory_changed_no_existing_rp(self):
pt = self._pt_with_cns()
self.assertRaises(
ValueError,
pt.has_inventory_changed,
uuids.non_existing_rp,
{}
)
def test_update_inventory_no_existing_rp(self):
pt = self._pt_with_cns()
self.assertRaises(
ValueError,
pt.update_inventory,
uuids.non_existing_rp,
{},
)
def test_has_inventory_changed(self):
cn = self.compute_node1
pt = self._pt_with_cns()
rp_gen = 1
cn_inv = {
'VCPU': {
'total': 8,
'min_unit': 1,
'max_unit': 8,
'step_size': 1,
'allocation_ratio': 16.0,
},
'MEMORY_MB': {
'total': 1024,
'reserved': 512,
'min_unit': 64,
'max_unit': 1024,
'step_size': 64,
'allocation_ratio': 1.5,
},
'DISK_GB': {
'total': 1000,
'reserved': 100,
'min_unit': 10,
'max_unit': 1000,
'step_size': 10,
'allocation_ratio': 1.0,
},
}
self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# Updating with the same inventory info should return False
self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# A data-grab's inventory should be "equal" to the original
cndata = pt.data(cn.uuid)
self.assertFalse(pt.has_inventory_changed(cn.uuid, cndata.inventory))
cn_inv['VCPU']['total'] = 6
self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# The data() result was not affected; now the tree's copy is different
self.assertTrue(pt.has_inventory_changed(cn.uuid, cndata.inventory))
self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# Deleting a key in the new record should NOT result in changes being
# recorded...
del cn_inv['VCPU']['allocation_ratio']
self.assertFalse(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertFalse(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
del cn_inv['MEMORY_MB']
self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
# ...but *adding* a key in the new record *should* result in changes
# being recorded
cn_inv['VCPU']['reserved'] = 0
self.assertTrue(pt.has_inventory_changed(cn.uuid, cn_inv))
self.assertTrue(pt.update_inventory(cn.uuid, cn_inv,
generation=rp_gen))
def test_have_traits_changed_no_existing_rp(self):
pt = self._pt_with_cns()
self.assertRaises(
ValueError, pt.have_traits_changed, uuids.non_existing_rp, [])
def test_update_traits_no_existing_rp(self):
pt = self._pt_with_cns()
self.assertRaises(
ValueError, pt.update_traits, uuids.non_existing_rp, [])
def test_have_traits_changed(self):
cn = self.compute_node1
pt = self._pt_with_cns()
rp_gen = 1
traits = [
"HW_GPU_API_DIRECT3D_V7_0",
"HW_NIC_OFFLOAD_SG",
"HW_CPU_X86_AVX",
]
self.assertTrue(pt.have_traits_changed(cn.uuid, traits))
# A data-grab's traits are the same
cnsnap = pt.data(cn.uuid)
self.assertFalse(pt.have_traits_changed(cn.uuid, cnsnap.traits))
self.assertTrue(pt.has_traits(cn.uuid, []))
self.assertFalse(pt.has_traits(cn.uuid, traits))
self.assertFalse(pt.has_traits(cn.uuid, traits[:1]))
self.assertTrue(pt.update_traits(cn.uuid, traits, generation=rp_gen))
self.assertTrue(pt.has_traits(cn.uuid, traits))
self.assertTrue(pt.has_traits(cn.uuid, traits[:1]))
# Updating with the same traits info should return False
self.assertFalse(pt.have_traits_changed(cn.uuid, traits))
# But the generation should get updated
rp_gen = 2
self.assertFalse(pt.update_traits(cn.uuid, traits, generation=rp_gen))
self.assertFalse(pt.have_traits_changed(cn.uuid, traits))
self.assertEqual(rp_gen, pt.data(cn.uuid).generation)
self.assertTrue(pt.has_traits(cn.uuid, traits))
self.assertTrue(pt.has_traits(cn.uuid, traits[:1]))
# Make a change to the traits list
traits.append("HW_GPU_RESOLUTION_W800H600")
self.assertTrue(pt.have_traits_changed(cn.uuid, traits))
# The previously-taken data now differs
self.assertTrue(pt.have_traits_changed(cn.uuid, cnsnap.traits))
self.assertFalse(pt.has_traits(cn.uuid, traits[-1:]))
# Don't update the generation
self.assertTrue(pt.update_traits(cn.uuid, traits))
self.assertEqual(rp_gen, pt.data(cn.uuid).generation)
self.assertTrue(pt.has_traits(cn.uuid, traits[-1:]))
def test_add_remove_traits(self):
cn = self.compute_node1
pt = self._pt_with_cns()
self.assertEqual(set([]), pt.data(cn.uuid).traits)
# Test adding with no trait provided for a bogus provider
pt.add_traits('bogus-uuid')
self.assertEqual(
set([]),
pt.data(cn.uuid).traits
)
# Add a couple of traits
pt.add_traits(cn.uuid, "HW_GPU_API_DIRECT3D_V7_0", "HW_NIC_OFFLOAD_SG")
self.assertEqual(
set(["HW_GPU_API_DIRECT3D_V7_0", "HW_NIC_OFFLOAD_SG"]),
pt.data(cn.uuid).traits)
# set() behavior: add a trait that's already there, and one that's not.
# The unrelated one is unaffected.
pt.add_traits(cn.uuid, "HW_GPU_API_DIRECT3D_V7_0", "HW_CPU_X86_AVX")
self.assertEqual(
set(["HW_GPU_API_DIRECT3D_V7_0", "HW_NIC_OFFLOAD_SG",
"HW_CPU_X86_AVX"]),
pt.data(cn.uuid).traits)
# Test removing with no trait provided for a bogus provider
pt.remove_traits('bogus-uuid')
self.assertEqual(
set(["HW_GPU_API_DIRECT3D_V7_0", "HW_NIC_OFFLOAD_SG",
"HW_CPU_X86_AVX"]),
pt.data(cn.uuid).traits)
# Now remove a trait
pt.remove_traits(cn.uuid, "HW_NIC_OFFLOAD_SG")
self.assertEqual(
set(["HW_GPU_API_DIRECT3D_V7_0", "HW_CPU_X86_AVX"]),
pt.data(cn.uuid).traits)
# set() behavior: remove a trait that's there, and one that's not.
# The unrelated one is unaffected.
pt.remove_traits(cn.uuid,
"HW_NIC_OFFLOAD_SG", "HW_GPU_API_DIRECT3D_V7_0")
self.assertEqual(set(["HW_CPU_X86_AVX"]), pt.data(cn.uuid).traits)
# Remove the last trait, and an unrelated one
pt.remove_traits(cn.uuid, "CUSTOM_FOO", "HW_CPU_X86_AVX")
self.assertEqual(set([]), pt.data(cn.uuid).traits)
def test_have_aggregates_changed_no_existing_rp(self):
pt = self._pt_with_cns()
self.assertRaises(
ValueError, pt.have_aggregates_changed, uuids.non_existing_rp, [])
def test_update_aggregates_no_existing_rp(self):
pt = self._pt_with_cns()
self.assertRaises(
ValueError, pt.update_aggregates, uuids.non_existing_rp, [])
def test_have_aggregates_changed(self):
cn = self.compute_node1
pt = self._pt_with_cns()
rp_gen = 1
aggregates = [
uuids.agg1,
uuids.agg2,
]
self.assertTrue(pt.have_aggregates_changed(cn.uuid, aggregates))
self.assertTrue(pt.in_aggregates(cn.uuid, []))
self.assertFalse(pt.in_aggregates(cn.uuid, aggregates))
self.assertFalse(pt.in_aggregates(cn.uuid, aggregates[:1]))
self.assertTrue(pt.update_aggregates(cn.uuid, aggregates,
generation=rp_gen))
self.assertTrue(pt.in_aggregates(cn.uuid, aggregates))
self.assertTrue(pt.in_aggregates(cn.uuid, aggregates[:1]))
# data() gets the same aggregates
cnsnap = pt.data(cn.uuid)
self.assertFalse(
pt.have_aggregates_changed(cn.uuid, cnsnap.aggregates))
# Updating with the same aggregates info should return False
self.assertFalse(pt.have_aggregates_changed(cn.uuid, aggregates))
# But the generation should get updated
rp_gen = 2
self.assertFalse(pt.update_aggregates(cn.uuid, aggregates,
generation=rp_gen))
self.assertFalse(pt.have_aggregates_changed(cn.uuid, aggregates))
self.assertEqual(rp_gen, pt.data(cn.uuid).generation)
self.assertTrue(pt.in_aggregates(cn.uuid, aggregates))
self.assertTrue(pt.in_aggregates(cn.uuid, aggregates[:1]))
# Make a change to the aggregates list
aggregates.append(uuids.agg3)
self.assertTrue(pt.have_aggregates_changed(cn.uuid, aggregates))
self.assertFalse(pt.in_aggregates(cn.uuid, aggregates[-1:]))
# Don't update the generation
self.assertTrue(pt.update_aggregates(cn.uuid, aggregates))
self.assertEqual(rp_gen, pt.data(cn.uuid).generation)
self.assertTrue(pt.in_aggregates(cn.uuid, aggregates[-1:]))
# Previously-taken data now differs
self.assertTrue(pt.have_aggregates_changed(cn.uuid, cnsnap.aggregates))
def test_add_remove_aggregates(self):
cn = self.compute_node1
pt = self._pt_with_cns()
self.assertEqual(set([]), pt.data(cn.uuid).aggregates)
# Add a couple of aggregates
pt.add_aggregates(cn.uuid, uuids.agg1, uuids.agg2)
self.assertEqual(
set([uuids.agg1, uuids.agg2]),
pt.data(cn.uuid).aggregates)
# set() behavior: add an aggregate that's already there, and one that's
# not. The unrelated one is unaffected.
pt.add_aggregates(cn.uuid, uuids.agg1, uuids.agg3)
self.assertEqual(set([uuids.agg1, uuids.agg2, uuids.agg3]),
pt.data(cn.uuid).aggregates)
# Now remove an aggregate
pt.remove_aggregates(cn.uuid, uuids.agg2)
self.assertEqual(set([uuids.agg1, uuids.agg3]),
pt.data(cn.uuid).aggregates)
# set() behavior: remove an aggregate that's there, and one that's not.
# The unrelated one is unaffected.
pt.remove_aggregates(cn.uuid, uuids.agg2, uuids.agg3)
self.assertEqual(set([uuids.agg1]), pt.data(cn.uuid).aggregates)
# Remove the last aggregate, and an unrelated one
pt.remove_aggregates(cn.uuid, uuids.agg4, uuids.agg1)
self.assertEqual(set([]), pt.data(cn.uuid).aggregates)

View File

@ -0,0 +1,39 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fakes relating to the `requests` module."""
import requests
class FakeResponse(requests.Response):
def __init__(self, status_code, content=None, headers=None):
"""A requests.Response that can be used as a mock return_value.
A key feature is that the instance will evaluate to True or False like
a real Response, based on the status_code.
Properties like ok, status_code, text, and content, and methods like
json(), work as expected based on the inputs.
:param status_code: Integer HTTP response code (200, 404, etc.)
:param content: String supplying the payload content of the response.
Using a json-encoded string will make the json() method
behave as expected.
:param headers: Dict of HTTP header values to set.
"""
super(FakeResponse, self).__init__()
self.status_code = status_code
if content:
self._content = content.encode('utf-8')
self.encoding = 'utf-8'
if headers:
self.headers = headers

View File

@ -371,6 +371,7 @@ object_data = {
'Network': '1.1-26e8d37a54e5fc905ede657744a221d9',
'ExecInstance': '1.0-59464e7b96db847c0abb1e96d3cec30a',
'Registry': '1.0-36c2053fbc30e0021630e657dd1699c9',
'RequestGroup': '1.0-5e08d68d0a63b729778340d608ec4eae',
}

View File

@ -13,6 +13,7 @@
from zun import objects
from zun.objects.numa import NUMATopology
from zun.tests.unit.db import utils as db_utils
@ -62,3 +63,20 @@ def get_test_registry(context, **kwargs):
for key in db_registry:
setattr(registry, key, db_registry[key])
return registry
def get_test_compute_node(context, **kwargs):
"""Return a test compute node object with appropriate attributes.
NOTE: The object leaves the attributes marked as changed, such
that a create() could be used to commit it to the DB.
"""
db_compute_node = db_utils.get_test_compute_node(**kwargs)
compute_node = objects.ComputeNode(context)
for key in db_compute_node:
if key == 'numa_topology':
numa_obj = NUMATopology._from_dict(db_compute_node[key])
compute_node.numa_topology = numa_obj
else:
setattr(compute_node, key, db_compute_node[key])
return compute_node

View File

@ -14,7 +14,7 @@ import mock
from oslo_config import cfg
from zun.scheduler import client as scheduler_client
from zun.scheduler.client import query as scheduler_client
from zun.scheduler import filter_scheduler
from zun.tests import base
from zun.tests.unit.scheduler import fakes

File diff suppressed because it is too large Load Diff