Add support for Capsule volume
Capsule now can support attach volume when created. Also modify the template for the format of volume in capsule. User can either create a new volume when launch the capsule by specify the volume size, or use the existed available volumes by set the volumeID in the template. Now the volume could not support multiple attach to serveral container, just support: 1. one volume attach to one container 2. Several volumes attach to one container Part of blueprint introduce-compose Change-Id: I2e2afba94cb83e6e7924366cbd8135fe6d6eea16 Signed-off-by: Kevin Zhao <kevin.zhao@arm.com>
This commit is contained in:
parent
010753f0e2
commit
1325606d45
35
template/capsule/capsule-volume.yaml
Normal file
35
template/capsule/capsule-volume.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
capsule_template_version: 2017-12-20
|
||||
# use "-" because that the fields have many items
|
||||
capsule_version: beta
|
||||
kind: capsule
|
||||
metadata:
|
||||
name: capsule-volume
|
||||
labels:
|
||||
foo: bar
|
||||
restart_policy: always
|
||||
spec:
|
||||
containers:
|
||||
- image: test
|
||||
command:
|
||||
- "/bin/bash"
|
||||
workdir: /root
|
||||
labels:
|
||||
app: web
|
||||
volumeMounts:
|
||||
- name: volume1
|
||||
mountPath: /data1
|
||||
- name: volume2
|
||||
mountPath: /data2
|
||||
- name: volume3
|
||||
mountPath: /data3
|
||||
volumes:
|
||||
- name: volume1
|
||||
cinder:
|
||||
size: 3
|
||||
autoRemove: True
|
||||
- name: volume2
|
||||
cinder:
|
||||
volumeID: 473e4a6a-99f2-4b42-88ce-5ab03a00b756
|
||||
- name: volume3
|
||||
cinder:
|
||||
volumeID: f4246aa1-1c87-479c-a2ab-4dbaf0c3c7bb
|
@ -28,6 +28,10 @@ spec:
|
||||
memory: 1024
|
||||
environment:
|
||||
PATCH: /usr/local/bin
|
||||
volumeMounts:
|
||||
- name: volume1
|
||||
mountPath: /data1
|
||||
readOnly: True
|
||||
- image: centos
|
||||
command:
|
||||
- "echo"
|
||||
@ -53,10 +57,19 @@ spec:
|
||||
memory: 1024
|
||||
environment:
|
||||
NWH: /usr/bin/
|
||||
volumeMounts:
|
||||
- name: volume2
|
||||
mountPath: /data2
|
||||
- name: volume3
|
||||
mountPath: /data3
|
||||
volumes:
|
||||
- name: volume1
|
||||
drivers: cinder
|
||||
driverOptions: options
|
||||
size: 5GB
|
||||
volumeType: type1
|
||||
image: ubuntu-xenial
|
||||
cinder:
|
||||
size: 5
|
||||
autoRemove: True
|
||||
- name: volume2
|
||||
cinder:
|
||||
volumeID: 473e4a6a-99f2-4b42-88ce-5ab03a00b756
|
||||
- name: volume3
|
||||
cinder:
|
||||
volumeID: f4246aa1-1c87-479c-a2ab-4dbaf0c3c7bb
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
from oslo_log import log as logging
|
||||
import pecan
|
||||
import six
|
||||
|
||||
from zun.api.controllers import base
|
||||
from zun.api.controllers.experimental import collection
|
||||
@ -23,11 +24,13 @@ from zun.api.controllers import link
|
||||
from zun.api import utils as api_utils
|
||||
from zun.common import consts
|
||||
from zun.common import exception
|
||||
from zun.common.i18n import _
|
||||
from zun.common import name_generator
|
||||
from zun.common import policy
|
||||
from zun.common import utils
|
||||
from zun.common import validation
|
||||
from zun import objects
|
||||
from zun.volume import cinder_api as cinder
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -126,8 +129,14 @@ class CapsuleController(base.Controller):
|
||||
compute_api = pecan.request.compute_api
|
||||
policy.enforce(context, "capsule:create",
|
||||
action="capsule:create")
|
||||
|
||||
# Abstract the capsule specification
|
||||
capsules_spec = capsule_dict['spec']
|
||||
containers_spec = utils.check_capsule_template(capsules_spec)
|
||||
spec_content = utils.check_capsule_template(capsules_spec)
|
||||
containers_spec = utils.capsule_get_container_spec(spec_content)
|
||||
volumes_spec = utils.capsule_get_volume_spec(spec_content)
|
||||
|
||||
# Create the capsule Object
|
||||
new_capsule = objects.Capsule(context, **capsule_dict)
|
||||
new_capsule.project_id = context.project_id
|
||||
new_capsule.user_id = context.user_id
|
||||
@ -137,12 +146,15 @@ class CapsuleController(base.Controller):
|
||||
new_capsule.volumes = []
|
||||
capsule_need_cpu = 0
|
||||
capsule_need_memory = 0
|
||||
count = len(containers_spec)
|
||||
container_volume_requests = []
|
||||
|
||||
capsule_restart_policy = capsules_spec.get('restart_policy', 'always')
|
||||
|
||||
metadata_info = capsules_spec.get('metadata', None)
|
||||
requested_networks = capsules_spec.get('nets', [])
|
||||
requested_networks_info = capsules_spec.get('nets', [])
|
||||
requested_networks = \
|
||||
utils.build_requested_networks(context, requested_networks_info)
|
||||
|
||||
if metadata_info:
|
||||
new_capsule.meta_name = metadata_info.get('name', None)
|
||||
new_capsule.meta_labels = metadata_info.get('labels', None)
|
||||
@ -157,7 +169,8 @@ class CapsuleController(base.Controller):
|
||||
new_capsule.containers.append(sandbox_container)
|
||||
new_capsule.containers_uuids.append(sandbox_container.uuid)
|
||||
|
||||
for k in range(count):
|
||||
container_num = len(containers_spec)
|
||||
for k in range(container_num):
|
||||
container_dict = containers_spec[k]
|
||||
container_dict['project_id'] = context.project_id
|
||||
container_dict['user_id'] = context.user_id
|
||||
@ -181,7 +194,7 @@ class CapsuleController(base.Controller):
|
||||
container_dict['command'] = container_dict['args']
|
||||
container_dict.pop('args')
|
||||
|
||||
# NOTE(kevinz): Don't support pod remapping, will find a
|
||||
# NOTE(kevinz): Don't support port remapping, will find a
|
||||
# easy way to implement it.
|
||||
# if container need to open some port, just open it in container,
|
||||
# user can change the security group and getting access to port.
|
||||
@ -205,6 +218,11 @@ class CapsuleController(base.Controller):
|
||||
"Name": capsule_restart_policy}
|
||||
utils.check_for_restart_policy(container_dict)
|
||||
|
||||
if container_dict.get('volumeMounts'):
|
||||
for volume in container_dict['volumeMounts']:
|
||||
volume['container_name'] = name
|
||||
container_volume_requests.append(volume)
|
||||
|
||||
container_dict['status'] = consts.CREATING
|
||||
container_dict['interactive'] = True
|
||||
new_container = objects.Container(context, **container_dict)
|
||||
@ -212,10 +230,15 @@ class CapsuleController(base.Controller):
|
||||
new_capsule.containers.append(new_container)
|
||||
new_capsule.containers_uuids.append(new_container.uuid)
|
||||
|
||||
# Deal with the volume support
|
||||
requested_volumes = \
|
||||
self._build_requested_volumes(context, volumes_spec,
|
||||
container_volume_requests)
|
||||
new_capsule.cpu = capsule_need_cpu
|
||||
new_capsule.memory = str(capsule_need_memory) + 'M'
|
||||
new_capsule.save(context)
|
||||
compute_api.capsule_create(context, new_capsule, requested_networks)
|
||||
compute_api.capsule_create(context, new_capsule, requested_networks,
|
||||
requested_volumes)
|
||||
# Set the HTTP Location Header
|
||||
pecan.response.location = link.build_url('capsules',
|
||||
new_capsule.uuid)
|
||||
@ -302,3 +325,64 @@ class CapsuleController(base.Controller):
|
||||
dict = container_dict[field][k]
|
||||
container_dict[field] = dict
|
||||
return container_dict
|
||||
|
||||
def _build_requested_volumes(self, context, volume_spec, volume_mounts):
|
||||
# NOTE(hongbin): We assume cinder is the only volume provider here.
|
||||
# The logic needs to be re-visited if a second volume provider
|
||||
# (i.e. Manila) is introduced.
|
||||
# NOTE(kevinz): We assume the volume_mounts has been pretreated,
|
||||
# there won't occur that volume multiple attach and no untapped
|
||||
# volume.
|
||||
cinder_api = cinder.CinderAPI(context)
|
||||
volume_driver = "cinder"
|
||||
requested_volumes = []
|
||||
volume_created = []
|
||||
try:
|
||||
for mount in volume_spec:
|
||||
mount_driver = mount[volume_driver]
|
||||
auto_remove = False
|
||||
if mount_driver.get("volumeID"):
|
||||
uuid = mount_driver.get("volumeID")
|
||||
volume = cinder_api.search_volume(uuid)
|
||||
cinder_api.ensure_volume_usable(volume)
|
||||
else:
|
||||
size = mount_driver.get("size")
|
||||
volume = cinder_api.create_volume(size)
|
||||
volume_created.append(volume)
|
||||
if "autoRemove" in mount_driver.keys() \
|
||||
and mount_driver.get("autoRemove", False):
|
||||
auto_remove = True
|
||||
|
||||
mount_destination = None
|
||||
container_name = None
|
||||
|
||||
for item in volume_mounts:
|
||||
if item['name'] == mount['name']:
|
||||
mount_destination = item['mountPath']
|
||||
container_name = item['container_name']
|
||||
break
|
||||
|
||||
if mount_destination and container_name:
|
||||
volmapp = objects.VolumeMapping(
|
||||
context,
|
||||
volume_id=volume.id, volume_provider=volume_driver,
|
||||
container_path=mount_destination,
|
||||
user_id=context.user_id,
|
||||
project_id=context.project_id,
|
||||
auto_remove=auto_remove)
|
||||
requested_volumes.append({container_name: volmapp})
|
||||
else:
|
||||
msg = _("volume mount parameters is invalid.")
|
||||
raise exception.Invalid(msg)
|
||||
except Exception as e:
|
||||
# if volume search or created failed, will remove all
|
||||
# the created volume. The existed volume will remain.
|
||||
for volume in volume_created:
|
||||
try:
|
||||
cinder_api.delete_volume(volume.id)
|
||||
except Exception as exc:
|
||||
LOG.error('Error on deleting volume "%s": %s.',
|
||||
volume.id, six.text_type(exc))
|
||||
raise e
|
||||
|
||||
return requested_volumes
|
||||
|
@ -238,7 +238,7 @@ class ContainersController(base.Controller):
|
||||
min_version=min_version)
|
||||
|
||||
nets = container_dict.get('nets', [])
|
||||
requested_networks = self._build_requested_networks(context, nets)
|
||||
requested_networks = utils.build_requested_networks(context, nets)
|
||||
pci_req = self._create_pci_requests_for_sriov_ports(context,
|
||||
requested_networks)
|
||||
|
||||
@ -365,57 +365,6 @@ class ContainersController(base.Controller):
|
||||
phynet_name = net.get('provider:physical_network')
|
||||
return phynet_name
|
||||
|
||||
def _check_external_network_attach(self, context, nets):
|
||||
"""Check if attaching to external network is permitted."""
|
||||
if not context.can(NETWORK_ATTACH_EXTERNAL,
|
||||
fatal=False):
|
||||
for net in nets:
|
||||
if net.get('router:external') and not net.get('shared'):
|
||||
raise exception.ExternalNetworkAttachForbidden(
|
||||
network_uuid=net['network'])
|
||||
|
||||
def _build_requested_networks(self, context, nets):
|
||||
neutron_api = neutron.NeutronAPI(context)
|
||||
requested_networks = []
|
||||
for net in nets:
|
||||
if net.get('port'):
|
||||
port = neutron_api.get_neutron_port(net['port'])
|
||||
neutron_api.ensure_neutron_port_usable(port)
|
||||
network = neutron_api.get_neutron_network(port['network_id'])
|
||||
requested_networks.append({'network': port['network_id'],
|
||||
'port': port['id'],
|
||||
'router:external':
|
||||
network.get('router:external'),
|
||||
'shared': network.get('shared'),
|
||||
'v4-fixed-ip': '',
|
||||
'v6-fixed-ip': '',
|
||||
'preserve_on_delete': True})
|
||||
elif net.get('network'):
|
||||
network = neutron_api.get_neutron_network(net['network'])
|
||||
requested_networks.append({'network': network['id'],
|
||||
'port': '',
|
||||
'router:external':
|
||||
network.get('router:external'),
|
||||
'shared': network.get('shared'),
|
||||
'v4-fixed-ip':
|
||||
net.get('v4-fixed-ip', ''),
|
||||
'v6-fixed-ip':
|
||||
net.get('v6-fixed-ip', ''),
|
||||
'preserve_on_delete': False})
|
||||
|
||||
if not requested_networks:
|
||||
# Find an available neutron net and create docker network by
|
||||
# wrapping the neutron net.
|
||||
neutron_net = neutron_api.get_available_network()
|
||||
requested_networks.append({'network': neutron_net['id'],
|
||||
'port': '',
|
||||
'v4-fixed-ip': '',
|
||||
'v6-fixed-ip': '',
|
||||
'preserve_on_delete': False})
|
||||
|
||||
self._check_external_network_attach(context, requested_networks)
|
||||
return requested_networks
|
||||
|
||||
def _build_requested_volumes(self, context, mounts):
|
||||
# NOTE(hongbin): We assume cinder is the only volume provider here.
|
||||
# The logic needs to be re-visited if a second volume provider
|
||||
|
@ -37,9 +37,11 @@ from zun.common import exception
|
||||
from zun.common.i18n import _
|
||||
from zun.common import privileged
|
||||
import zun.conf
|
||||
from zun.network import neutron
|
||||
|
||||
CONF = zun.conf.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network'
|
||||
|
||||
synchronized = lockutils.synchronized_with_prefix('zun-')
|
||||
|
||||
@ -338,29 +340,63 @@ def execute(*cmd, **kwargs):
|
||||
|
||||
def check_capsule_template(tpl):
|
||||
# TODO(kevinz): add volume spec check
|
||||
kind_field = tpl.get('kind', None)
|
||||
kind_field = tpl.get('kind')
|
||||
if kind_field not in ['capsule', 'Capsule']:
|
||||
raise exception.InvalidCapsuleTemplate("kind fields need to be "
|
||||
"set as capsule or Capsule")
|
||||
spec_field = tpl.get('spec', None)
|
||||
spec_field = tpl.get('spec')
|
||||
if spec_field is None:
|
||||
raise exception.InvalidCapsuleTemplate("No Spec found")
|
||||
if spec_field.get('containers', None) is None:
|
||||
if spec_field.get('containers') is None:
|
||||
raise exception.InvalidCapsuleTemplate("No valid containers field")
|
||||
containers_spec = spec_field.get('containers', None)
|
||||
return spec_field
|
||||
|
||||
|
||||
def capsule_get_container_spec(spec_field):
|
||||
containers_spec = spec_field.get('containers')
|
||||
containers_num = len(containers_spec)
|
||||
if containers_num == 0:
|
||||
raise exception.InvalidCapsuleTemplate("Capsule need to have one "
|
||||
"container at least")
|
||||
|
||||
for i in range(0, containers_num):
|
||||
container_image = containers_spec[i].get('image', None)
|
||||
container_image = containers_spec[i].get('image')
|
||||
if container_image is None:
|
||||
raise exception.InvalidCapsuleTemplate("Container "
|
||||
"image is needed")
|
||||
return containers_spec
|
||||
|
||||
|
||||
def capsule_get_volume_spec(spec_field):
|
||||
volumes_spec = spec_field.get('volumes')
|
||||
if not volumes_spec:
|
||||
return []
|
||||
volumes_num = len(volumes_spec)
|
||||
|
||||
for i in range(volumes_num):
|
||||
volume_name = volumes_spec[i].get('name')
|
||||
if volume_name is None:
|
||||
raise exception.InvalidCapsuleTemplate("Volume name "
|
||||
"is needed")
|
||||
if volumes_spec[i].get('cinder'):
|
||||
cinder_spec = volumes_spec[i].get('cinder')
|
||||
volume_uuid = cinder_spec.get('volumeID')
|
||||
volume_size = cinder_spec.get('size')
|
||||
if not volume_uuid:
|
||||
if volume_size is None:
|
||||
raise exception.InvalidCapsuleTemplate("Volume size "
|
||||
"is needed")
|
||||
elif volume_uuid and volume_size:
|
||||
raise exception.InvalidCapsuleTemplate("Volume size and uuid "
|
||||
"could not be set at "
|
||||
"the same time")
|
||||
else:
|
||||
raise exception.InvalidCapsuleTemplate("Zun now Only support "
|
||||
"Cinder volume driver")
|
||||
|
||||
return volumes_spec
|
||||
|
||||
|
||||
def is_all_projects(search_opts):
|
||||
all_projects = search_opts.get('all_projects') or \
|
||||
search_opts.get('all_tenants')
|
||||
@ -411,3 +447,61 @@ def check_for_restart_policy(container_dict):
|
||||
raise exception.InvalidValue(msg)
|
||||
elif name in ['no']:
|
||||
container_dict.get('restart_policy')['MaximumRetryCount'] = '0'
|
||||
|
||||
|
||||
def build_requested_networks(context, nets):
|
||||
"""Build requested networks by calling neutron client
|
||||
|
||||
:param nets: The special network uuid when create container
|
||||
if none, will call neutron to create new network.
|
||||
:returns: available network and ports
|
||||
"""
|
||||
neutron_api = neutron.NeutronAPI(context)
|
||||
requested_networks = []
|
||||
for net in nets:
|
||||
if net.get('port'):
|
||||
port = neutron_api.get_neutron_port(net['port'])
|
||||
neutron_api.ensure_neutron_port_usable(port)
|
||||
network = neutron_api.get_neutron_network(port['network_id'])
|
||||
requested_networks.append({'network': port['network_id'],
|
||||
'port': port['id'],
|
||||
'router:external':
|
||||
network.get('router:external'),
|
||||
'shared': network.get('shared'),
|
||||
'v4-fixed-ip': '',
|
||||
'v6-fixed-ip': '',
|
||||
'preserve_on_delete': True})
|
||||
elif net.get('network'):
|
||||
network = neutron_api.get_neutron_network(net['network'])
|
||||
requested_networks.append({'network': network['id'],
|
||||
'port': '',
|
||||
'router:external':
|
||||
network.get('router:external'),
|
||||
'shared': network.get('shared'),
|
||||
'v4-fixed-ip':
|
||||
net.get('v4-fixed-ip', ''),
|
||||
'v6-fixed-ip':
|
||||
net.get('v6-fixed-ip', ''),
|
||||
'preserve_on_delete': False})
|
||||
if not requested_networks:
|
||||
# Find an available neutron net and create docker network by
|
||||
# wrapping the neutron net.
|
||||
neutron_net = neutron_api.get_available_network()
|
||||
requested_networks.append({'network': neutron_net['id'],
|
||||
'port': '',
|
||||
'v4-fixed-ip': '',
|
||||
'v6-fixed-ip': '',
|
||||
'preserve_on_delete': False})
|
||||
|
||||
check_external_network_attach(context, requested_networks)
|
||||
return requested_networks
|
||||
|
||||
|
||||
def check_external_network_attach(context, nets):
|
||||
"""Check if attaching to external network is permitted."""
|
||||
if not context.can(NETWORK_ATTACH_EXTERNAL,
|
||||
fatal=False):
|
||||
for net in nets:
|
||||
if net.get('router:external') and not net.get('shared'):
|
||||
raise exception.ExternalNetworkAttachForbidden(
|
||||
network_uuid=net['network'])
|
||||
|
@ -135,8 +135,8 @@ class API(object):
|
||||
return self.rpcapi.image_search(context, image, image_driver,
|
||||
exact_match, *args)
|
||||
|
||||
def capsule_create(self, context, new_capsule,
|
||||
requested_networks=None, extra_spec=None):
|
||||
def capsule_create(self, context, new_capsule, requested_networks=None,
|
||||
requested_volumes=None, extra_spec=None):
|
||||
host_state = None
|
||||
try:
|
||||
host_state = self._schedule_container(context, new_capsule,
|
||||
@ -147,7 +147,8 @@ class API(object):
|
||||
new_capsule.save(context)
|
||||
return
|
||||
self.rpcapi.capsule_create(context, host_state['host'], new_capsule,
|
||||
requested_networks, host_state['limits'])
|
||||
requested_networks, requested_volumes,
|
||||
host_state['limits'])
|
||||
|
||||
def capsule_delete(self, context, capsule, *args):
|
||||
return self.rpcapi.capsule_delete(context, capsule, *args)
|
||||
|
@ -235,7 +235,6 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
if self.use_sandbox:
|
||||
sandbox = self._create_sandbox(context, container,
|
||||
requested_networks,
|
||||
requested_volumes,
|
||||
reraise)
|
||||
if sandbox is None:
|
||||
return
|
||||
@ -315,7 +314,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
'driver': self.driver})
|
||||
|
||||
def _create_sandbox(self, context, container, requested_networks,
|
||||
requested_volumes, reraise=False):
|
||||
reraise=False):
|
||||
self._update_task_state(context, container, consts.SANDBOX_CREATING)
|
||||
sandbox_image = CONF.sandbox_image
|
||||
sandbox_image_driver = CONF.sandbox_image_driver
|
||||
@ -330,7 +329,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
sandbox_id = self.driver.create_sandbox(
|
||||
context, container, image=sandbox_image,
|
||||
requested_networks=requested_networks,
|
||||
requested_volumes=requested_volumes)
|
||||
requested_volumes=[])
|
||||
return sandbox_id
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception(reraise=reraise):
|
||||
@ -866,16 +865,29 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
except Exception:
|
||||
return
|
||||
|
||||
def capsule_create(self, context, capsule, requested_networks, limits):
|
||||
def capsule_create(self, context, capsule, requested_networks,
|
||||
requested_volumes, limits):
|
||||
@utils.synchronized("capsule-" + capsule.uuid)
|
||||
def do_capsule_create():
|
||||
self._do_capsule_create(context, capsule, requested_networks,
|
||||
limits)
|
||||
requested_volumes, limits)
|
||||
|
||||
utils.spawn_n(do_capsule_create)
|
||||
|
||||
def _do_capsule_create(self, context, capsule, requested_networks=None,
|
||||
def _do_capsule_create(self, context, capsule,
|
||||
requested_networks=None,
|
||||
requested_volumes=None,
|
||||
limits=None, reraise=False):
|
||||
"""Create capsule in the compute node
|
||||
|
||||
:param context: security context
|
||||
:param capsule: the special capsule object
|
||||
:param requested_networks: the network ports that capsule will
|
||||
connect
|
||||
:param requested_volumes: the volume that capsule need
|
||||
:param limits: no use field now.
|
||||
:param reraise: flag of reraise the error, default is Falses
|
||||
"""
|
||||
capsule.containers[0].image = CONF.sandbox_image
|
||||
capsule.containers[0].image_driver = CONF.sandbox_image_driver
|
||||
capsule.containers[0].image_pull_policy = \
|
||||
@ -883,20 +895,33 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
capsule.containers[0].save(context)
|
||||
sandbox = self._create_sandbox(context,
|
||||
capsule.containers[0],
|
||||
requested_networks, reraise)
|
||||
requested_networks,
|
||||
reraise)
|
||||
capsule.containers[0].task_state = None
|
||||
capsule.containers[0].status = consts.RUNNING
|
||||
sandbox_id = capsule.containers[0].get_sandbox_id()
|
||||
capsule.containers[0].container_id = sandbox_id
|
||||
capsule.containers[0].save(context)
|
||||
count = len(capsule.containers)
|
||||
|
||||
for k in range(1, count):
|
||||
container_requested_volumes = []
|
||||
capsule.containers[k].set_sandbox_id(sandbox_id)
|
||||
capsule.containers[k].addresses = capsule.containers[0].addresses
|
||||
container_name = capsule.containers[k].name
|
||||
for volume in requested_volumes:
|
||||
if volume.get(container_name, None):
|
||||
container_requested_volumes.append(
|
||||
volume.get(container_name))
|
||||
if not self._attach_volumes(context, capsule.containers[k],
|
||||
container_requested_volumes):
|
||||
return
|
||||
# Add volume assignment
|
||||
created_container = \
|
||||
self._do_container_create_base(context,
|
||||
capsule.containers[k],
|
||||
requested_networks,
|
||||
container_requested_volumes,
|
||||
sandbox=sandbox,
|
||||
limits=limits)
|
||||
if created_container:
|
||||
|
@ -173,10 +173,11 @@ class API(rpc_service.API):
|
||||
exact_match=exact_match)
|
||||
|
||||
def capsule_create(self, context, host, capsule,
|
||||
requested_networks, limits):
|
||||
requested_networks, requested_volumes, limits):
|
||||
self._cast(host, 'capsule_create',
|
||||
capsule=capsule,
|
||||
requested_networks=requested_networks,
|
||||
requested_volumes=requested_volumes,
|
||||
limits=limits)
|
||||
|
||||
def capsule_delete(self, context, capsule):
|
||||
|
@ -161,19 +161,18 @@ class DockerDriver(driver.ContainerDriver):
|
||||
|
||||
host_config = {}
|
||||
host_config['runtime'] = runtime
|
||||
host_config['binds'] = binds
|
||||
kwargs['volumes'] = [b['bind'] for b in binds.values()]
|
||||
if sandbox_id:
|
||||
host_config['network_mode'] = 'container:%s' % sandbox_id
|
||||
# TODO(hongbin): Uncomment this after docker-py add support for
|
||||
# container mode for pid namespace.
|
||||
# host_config['pid_mode'] = 'container:%s' % sandbox_id
|
||||
host_config['ipc_mode'] = 'container:%s' % sandbox_id
|
||||
host_config['volumes_from'] = sandbox_id
|
||||
else:
|
||||
self._process_networking_config(
|
||||
context, container, requested_networks, host_config,
|
||||
kwargs, docker)
|
||||
host_config['binds'] = binds
|
||||
kwargs['volumes'] = [b['bind'] for b in binds.values()]
|
||||
if container.auto_remove:
|
||||
host_config['auto_remove'] = container.auto_remove
|
||||
if container.memory is not None:
|
||||
|
@ -24,19 +24,24 @@ from zun.tests.unit.db import utils
|
||||
|
||||
class TestCapsuleController(api_base.FunctionalTest):
|
||||
@patch('zun.compute.api.API.capsule_create')
|
||||
def test_create_capsule(self, mock_capsule_create):
|
||||
params = ('{"spec": {"kind": "capsule",'
|
||||
'"spec": {"containers":'
|
||||
'[{"environment": {"ROOT_PASSWORD": "foo0"}, '
|
||||
'"image": "test", "labels": {"app": "web"}, '
|
||||
'"image_driver": "docker", "resources": '
|
||||
'{"allocation": {"cpu": 1, "memory": 1024}}}], '
|
||||
'"volumes": [{"name": "volume1", '
|
||||
'"image": "test", "drivers": "cinder", "volumeType": '
|
||||
'"type1", "driverOptions": "options", '
|
||||
'"size": "5GB"}]}, '
|
||||
'"metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"}, '
|
||||
'"name": "capsule-example"}}}')
|
||||
@patch('zun.network.neutron.NeutronAPI.get_available_network')
|
||||
def test_create_capsule(self, mock_capsule_create,
|
||||
mock_neutron_get_network):
|
||||
params = ('{'
|
||||
'"spec": '
|
||||
'{"kind": "capsule",'
|
||||
' "spec": {'
|
||||
' "containers":'
|
||||
' [{"environment": {"ROOT_PASSWORD": "foo0"}, '
|
||||
' "image": "test", "labels": {"app": "web"}, '
|
||||
' "image_driver": "docker", "resources": '
|
||||
' {"allocation": {"cpu": 1, "memory": 1024}}'
|
||||
' }]'
|
||||
' }, '
|
||||
' "metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"},'
|
||||
' "name": "capsule-example"}'
|
||||
' }'
|
||||
'}')
|
||||
response = self.post('/capsules/',
|
||||
params=params,
|
||||
content_type='application/json')
|
||||
@ -54,21 +59,31 @@ class TestCapsuleController(api_base.FunctionalTest):
|
||||
self.assertEqual(return_value["cpu"], expected_cpu)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertTrue(mock_capsule_create.called)
|
||||
self.assertTrue(mock_neutron_get_network.called)
|
||||
|
||||
@patch('zun.compute.api.API.capsule_create')
|
||||
def test_create_capsule_two_containers(self, mock_capsule_create):
|
||||
params = ('{"spec": {"kind": "capsule",'
|
||||
'"spec": {"containers":'
|
||||
'[{"environment": {"ROOT_PASSWORD": "foo0"}, '
|
||||
'"image": "test1", "labels": {"app0": "web0"}, '
|
||||
'"image_driver": "docker", "resources": '
|
||||
'{"allocation": {"cpu": 1, "memory": 1024}}}, '
|
||||
'{"environment": {"ROOT_PASSWORD": "foo1"}, '
|
||||
'"image": "test1", "labels": {"app1": "web1"}, '
|
||||
'"image_driver": "docker", "resources": '
|
||||
'{"allocation": {"cpu": 1, "memory": 1024}}}]}, '
|
||||
'"metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"}, '
|
||||
'"name": "capsule-example"}}}')
|
||||
@patch('zun.network.neutron.NeutronAPI.get_available_network')
|
||||
def test_create_capsule_two_containers(self, mock_capsule_create,
|
||||
mock_neutron_get_network):
|
||||
params = ('{'
|
||||
'"spec": '
|
||||
'{"kind": "capsule",'
|
||||
' "spec": {'
|
||||
' "containers":'
|
||||
' [{"environment": {"ROOT_PASSWORD": "foo0"}, '
|
||||
' "image": "test", "labels": {"app": "web"}, '
|
||||
' "image_driver": "docker", "resources": '
|
||||
' {"allocation": {"cpu": 1, "memory": 1024}}},'
|
||||
' {"environment": {"ROOT_PASSWORD": "foo1"}, '
|
||||
' "image": "test1", "labels": {"app1": "web1"}, '
|
||||
' "image_driver": "docker", "resources": '
|
||||
' {"allocation": {"cpu": 1, "memory": 1024}}}'
|
||||
' ]'
|
||||
' }, '
|
||||
' "metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"},'
|
||||
' "name": "capsule-example"}'
|
||||
' }'
|
||||
'}')
|
||||
response = self.post('/capsules/',
|
||||
params=params,
|
||||
content_type='application/json')
|
||||
@ -88,6 +103,7 @@ class TestCapsuleController(api_base.FunctionalTest):
|
||||
self.assertEqual(return_value["cpu"], expected_cpu)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertTrue(mock_capsule_create.called)
|
||||
self.assertTrue(mock_neutron_get_network.called)
|
||||
|
||||
@patch('zun.compute.api.API.capsule_create')
|
||||
@patch('zun.common.utils.check_capsule_template')
|
||||
@ -150,6 +166,177 @@ class TestCapsuleController(api_base.FunctionalTest):
|
||||
params=params, content_type='application/json')
|
||||
self.assertFalse(mock_capsule_create.called)
|
||||
|
||||
@patch('zun.volume.cinder_api.CinderAPI.ensure_volume_usable')
|
||||
@patch('zun.volume.cinder_api.CinderAPI.create_volume')
|
||||
@patch('zun.compute.api.API.capsule_create')
|
||||
@patch('zun.network.neutron.NeutronAPI.get_available_network')
|
||||
def test_create_capsule_with_create_new_volume(self, mock_capsule_create,
|
||||
mock_neutron_get_network,
|
||||
mock_create_volume,
|
||||
mock_ensure_volume_usable):
|
||||
fake_volume_id = 'fakevolid'
|
||||
fake_volume = mock.Mock(id=fake_volume_id)
|
||||
mock_create_volume.return_value = fake_volume
|
||||
params = ('{'
|
||||
'"spec":'
|
||||
'{"kind": "capsule",'
|
||||
' "spec":'
|
||||
' {"containers":'
|
||||
' [{"environment": {"ROOT_PASSWORD": "foo0"}, '
|
||||
' "image": "test", "labels": {"app": "web"}, '
|
||||
' "image_driver": "docker", "resources": '
|
||||
' {"allocation": {"cpu": 1, "memory": 1024}},'
|
||||
' "volumeMounts": [{"name": "volume1", '
|
||||
' "mountPath": "/data1"}]'
|
||||
' }'
|
||||
' ],'
|
||||
' "volumes":'
|
||||
' [{"name": "volume1",'
|
||||
' "cinder": {"size": 3, "autoRemove": "True"}'
|
||||
' }]'
|
||||
' }, '
|
||||
' "metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"},'
|
||||
' "name": "capsule-example"}'
|
||||
' }'
|
||||
'}')
|
||||
response = self.post('/capsules/',
|
||||
params=params,
|
||||
content_type='application/json')
|
||||
return_value = response.json
|
||||
expected_meta_name = "capsule-example"
|
||||
expected_meta_labels = {"foo0": "bar0", "foo1": "bar1"}
|
||||
expected_memory = '1024M'
|
||||
expected_cpu = 1.0
|
||||
expected_container_num = 2
|
||||
self.assertEqual(len(return_value["containers_uuids"]),
|
||||
expected_container_num)
|
||||
self.assertEqual(return_value["meta_name"], expected_meta_name)
|
||||
self.assertEqual(return_value["meta_labels"], expected_meta_labels)
|
||||
self.assertEqual(return_value["memory"], expected_memory)
|
||||
self.assertEqual(return_value["cpu"], expected_cpu)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertTrue(mock_capsule_create.called)
|
||||
self.assertTrue(mock_neutron_get_network.called)
|
||||
self.assertTrue(mock_create_volume.called)
|
||||
|
||||
@patch('zun.volume.cinder_api.CinderAPI.ensure_volume_usable')
|
||||
@patch('zun.volume.cinder_api.CinderAPI.search_volume')
|
||||
@patch('zun.compute.api.API.capsule_create')
|
||||
@patch('zun.network.neutron.NeutronAPI.get_available_network')
|
||||
def test_create_capsule_with_existed_volume(self, mock_capsule_create,
|
||||
mock_neutron_get_network,
|
||||
mock_search_volume,
|
||||
mock_ensure_volume_usable):
|
||||
fake_volume_id = 'fakevolid'
|
||||
fake_volume = mock.Mock(id=fake_volume_id)
|
||||
mock_search_volume.return_value = fake_volume
|
||||
params = ('{'
|
||||
'"spec":'
|
||||
'{"kind": "capsule",'
|
||||
' "spec":'
|
||||
' {"containers":'
|
||||
' [{"environment": {"ROOT_PASSWORD": "foo0"}, '
|
||||
' "image": "test", "labels": {"app": "web"}, '
|
||||
' "image_driver": "docker", "resources": '
|
||||
' {"allocation": {"cpu": 1, "memory": 1024}},'
|
||||
' "volumeMounts": [{"name": "volume1", '
|
||||
' "mountPath": "/data1"}]'
|
||||
' }'
|
||||
' ],'
|
||||
' "volumes":'
|
||||
' [{"name": "volume1",'
|
||||
' "cinder": {"volumeID": "fakevolid"}'
|
||||
' }]'
|
||||
' }, '
|
||||
' "metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"},'
|
||||
' "name": "capsule-example"}'
|
||||
' }'
|
||||
'}')
|
||||
response = self.post('/capsules/',
|
||||
params=params,
|
||||
content_type='application/json')
|
||||
return_value = response.json
|
||||
expected_meta_name = "capsule-example"
|
||||
expected_meta_labels = {"foo0": "bar0", "foo1": "bar1"}
|
||||
expected_memory = '1024M'
|
||||
expected_cpu = 1.0
|
||||
expected_container_num = 2
|
||||
self.assertEqual(len(return_value["containers_uuids"]),
|
||||
expected_container_num)
|
||||
self.assertEqual(return_value["meta_name"], expected_meta_name)
|
||||
self.assertEqual(return_value["meta_labels"], expected_meta_labels)
|
||||
self.assertEqual(return_value["memory"], expected_memory)
|
||||
self.assertEqual(return_value["cpu"], expected_cpu)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertTrue(mock_capsule_create.called)
|
||||
self.assertTrue(mock_neutron_get_network.called)
|
||||
self.assertTrue(mock_ensure_volume_usable.called)
|
||||
self.assertTrue(mock_search_volume.called)
|
||||
|
||||
@patch('zun.volume.cinder_api.CinderAPI.create_volume')
|
||||
@patch('zun.volume.cinder_api.CinderAPI.ensure_volume_usable')
|
||||
@patch('zun.volume.cinder_api.CinderAPI.search_volume')
|
||||
@patch('zun.compute.api.API.capsule_create')
|
||||
@patch('zun.network.neutron.NeutronAPI.get_available_network')
|
||||
def test_create_capsule_with_two_volumes(self, mock_capsule_create,
|
||||
mock_neutron_get_network,
|
||||
mock_search_volume,
|
||||
mock_ensure_volume_usable,
|
||||
mock_create_volume):
|
||||
fake_volume_id1 = 'fakevolid1'
|
||||
fake_volume = mock.Mock(id=fake_volume_id1)
|
||||
mock_search_volume.return_value = fake_volume
|
||||
fake_volume_id2 = 'fakevolid2'
|
||||
fake_volume = mock.Mock(id=fake_volume_id2)
|
||||
mock_create_volume.return_value = fake_volume
|
||||
params = ('{'
|
||||
'"spec":'
|
||||
'{"kind": "capsule",'
|
||||
' "spec":'
|
||||
' {"containers":'
|
||||
' [{"environment": {"ROOT_PASSWORD": "foo0"}, '
|
||||
' "image": "test", "labels": {"app": "web"}, '
|
||||
' "image_driver": "docker", "resources": '
|
||||
' {"allocation": {"cpu": 1, "memory": 1024}},'
|
||||
' "volumeMounts": [{"name": "volume1", '
|
||||
' "mountPath": "/data1"},'
|
||||
' {"name": "volume2", '
|
||||
' "mountPath": "/data2"}]'
|
||||
' }'
|
||||
' ],'
|
||||
' "volumes":'
|
||||
' [{"name": "volume1",'
|
||||
' "cinder": {"volumeID": "fakevolid1"}},'
|
||||
' {"name": "volume2",'
|
||||
' "cinder": {"size": 3, "autoRemove": "True"}'
|
||||
' }]'
|
||||
' }, '
|
||||
' "metadata": {"labels": {"foo0": "bar0", "foo1": "bar1"},'
|
||||
' "name": "capsule-example"}'
|
||||
' }'
|
||||
'}')
|
||||
response = self.post('/capsules/',
|
||||
params=params,
|
||||
content_type='application/json')
|
||||
return_value = response.json
|
||||
expected_meta_name = "capsule-example"
|
||||
expected_meta_labels = {"foo0": "bar0", "foo1": "bar1"}
|
||||
expected_memory = '1024M'
|
||||
expected_cpu = 1.0
|
||||
expected_container_num = 2
|
||||
self.assertEqual(len(return_value["containers_uuids"]),
|
||||
expected_container_num)
|
||||
self.assertEqual(return_value["meta_name"], expected_meta_name)
|
||||
self.assertEqual(return_value["meta_labels"], expected_meta_labels)
|
||||
self.assertEqual(return_value["memory"], expected_memory)
|
||||
self.assertEqual(return_value["cpu"], expected_cpu)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertTrue(mock_capsule_create.called)
|
||||
self.assertTrue(mock_neutron_get_network.called)
|
||||
self.assertTrue(mock_ensure_volume_usable.called)
|
||||
self.assertTrue(mock_search_volume.called)
|
||||
self.assertTrue(mock_create_volume.called)
|
||||
|
||||
@patch('zun.compute.api.API.container_show')
|
||||
@patch('zun.objects.Capsule.get_by_uuid')
|
||||
@patch('zun.objects.Container.get_by_uuid')
|
||||
|
@ -143,25 +143,53 @@ class TestUtils(base.TestCase):
|
||||
params = ({"kind": "capsule", "spec": {}})
|
||||
utils.check_capsule_template(params)
|
||||
|
||||
def test_capsule_get_container_spec(self):
|
||||
with self.assertRaisesRegex(
|
||||
exception.InvalidCapsuleTemplate,
|
||||
"Capsule need to have one container at least"):
|
||||
params = ({"kind": "capsule", "spec": {"containers": []}})
|
||||
utils.check_capsule_template(params)
|
||||
params = ({"containers": []})
|
||||
utils.capsule_get_container_spec(params)
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
exception.InvalidCapsuleTemplate, "Container "
|
||||
"image is needed"):
|
||||
params = ({"kind": "capsule",
|
||||
"spec": {"containers": [{"labels": {"app": "web"}}]}})
|
||||
utils.check_capsule_template(params)
|
||||
params = ({"containers": [{"labels": {"app": "web"}}]})
|
||||
utils.capsule_get_container_spec(params)
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
exception.InvalidCapsuleTemplate, "Container image is needed"):
|
||||
params = ({"kind": "capsule",
|
||||
"spec": {"containers": [{"image": "test1"},
|
||||
{"environment": {"ROOT_PASSWORD": "foo0"}}]}})
|
||||
utils.check_capsule_template(params)
|
||||
params = ({"containers": [
|
||||
{"image": "test1"},
|
||||
{"environment": {"ROOT_PASSWORD": "foo0"}}]})
|
||||
utils.capsule_get_container_spec(params)
|
||||
|
||||
def test_capsule_get_volume_spec(self):
|
||||
with self.assertRaisesRegex(
|
||||
exception.InvalidCapsuleTemplate,
|
||||
"Volume name is needed"):
|
||||
params = ({"volumes": [{"foo": "bar"}]})
|
||||
utils.capsule_get_volume_spec(params)
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
exception.InvalidCapsuleTemplate, "Volume size is needed"):
|
||||
params = ({"volumes": [{"name": "test",
|
||||
"cinder": {"foo": "bar"}}]})
|
||||
utils.capsule_get_volume_spec(params)
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
exception.InvalidCapsuleTemplate, "Volume size and uuid "
|
||||
"could not be set at "
|
||||
"the same time"):
|
||||
params = ({"volumes": [{"name": "test",
|
||||
"cinder": {"size": 3,
|
||||
"volumeID": "fakevolid"}}]})
|
||||
utils.capsule_get_volume_spec(params)
|
||||
|
||||
with self.assertRaisesRegex(
|
||||
exception.InvalidCapsuleTemplate, "Zun now Only support "
|
||||
"Cinder volume driver"):
|
||||
params = ({"volumes": [{"name": "test", "other": {}}]})
|
||||
utils.capsule_get_volume_spec(params)
|
||||
|
||||
@patch('zun.objects.Image.get_by_uuid')
|
||||
def test_get_image(self, mock_image_get_by_uuid):
|
||||
|
Loading…
x
Reference in New Issue
Block a user