Add boot-from-volume support for nodes
For example, a cloud may get better preformance from a cinder volume then the local compute drive. As a result, give nodepool to option to choose if the server should boot from volume or not. Change-Id: I3faefe99096fef1fe28816ac0a4b28c05ff7f0ec Depends-On: If58cd96b0b9ce4569120d60fbceb2c23b2f7641d Signed-off-by: Paul Belanger <pabelanger@redhat.com>
This commit is contained in:
parent
1a804c7859
commit
1d0990a1c1
@ -509,6 +509,12 @@ Example configuration::
|
||||
|
||||
**optional**
|
||||
|
||||
``boot-from-volume`` (bool)
|
||||
If given, the label for use in this pool will create a volume from the
|
||||
image and boot the node from it.
|
||||
|
||||
Default: False
|
||||
|
||||
``key-name``
|
||||
If given, is the name of a keypair that will be used when booting each
|
||||
server.
|
||||
@ -516,3 +522,8 @@ Example configuration::
|
||||
``console-log`` (default: False)
|
||||
On the failure of the ssh ready check, download the server console log to
|
||||
aid in debuging the problem.
|
||||
|
||||
``volume-size``
|
||||
When booting an image from volume, how big should the created volume be.
|
||||
|
||||
In gigabytes. Default 50.
|
||||
|
@ -41,6 +41,8 @@ class ConfigValidator:
|
||||
'flavor-name': str,
|
||||
'key-name': str,
|
||||
'console-log': bool,
|
||||
'boot-from-volume': bool,
|
||||
'volume-size': int,
|
||||
}
|
||||
|
||||
pool_label = v.All(pool_label_main,
|
||||
|
@ -271,6 +271,9 @@ def loadConfig(config_path):
|
||||
pl.flavor_name = label.get('flavor-name', None)
|
||||
pl.key_name = label.get('key-name')
|
||||
pl.console_log = label.get('console-log', False)
|
||||
pl.boot_from_volume = bool(label.get('boot-from-volume',
|
||||
False))
|
||||
pl.volume_size = label.get('volume-size', 50)
|
||||
|
||||
top_label = newconfig.labels[pl.name]
|
||||
top_label.pools.append(pp)
|
||||
|
@ -316,7 +316,9 @@ class NodeLauncher(threading.Thread, StatsReporter):
|
||||
config_drive=config_drive,
|
||||
nodepool_node_id=self._node.id,
|
||||
nodepool_image_name=image_name,
|
||||
networks=self._pool.networks)
|
||||
networks=self._pool.networks,
|
||||
boot_from_volume=self._label.boot_from_volume,
|
||||
volume_size=self._label.volume_size)
|
||||
|
||||
self._node.external_id = server.id
|
||||
self._node.hostname = hostname
|
||||
|
@ -189,7 +189,7 @@ class ProviderManager(object):
|
||||
flavor_name=None, min_ram=None,
|
||||
az=None, key_name=None, config_drive=True,
|
||||
nodepool_node_id=None, nodepool_image_name=None,
|
||||
networks=None):
|
||||
networks=None, boot_from_volume=False, volume_size=50):
|
||||
if not networks:
|
||||
networks = []
|
||||
if image_name:
|
||||
@ -201,6 +201,11 @@ class ProviderManager(object):
|
||||
image=image,
|
||||
flavor=flavor,
|
||||
config_drive=config_drive)
|
||||
if boot_from_volume:
|
||||
create_args['boot_from_volume'] = boot_from_volume
|
||||
create_args['volume_size'] = volume_size
|
||||
# NOTE(pabelanger): Always cleanup volumes when we delete a server.
|
||||
create_args['terminate_volume'] = True
|
||||
if key_name:
|
||||
create_args['key_name'] = key_name
|
||||
if az:
|
||||
|
@ -36,6 +36,8 @@ providers:
|
||||
- name: trusty-2-node
|
||||
diskimage: trusty
|
||||
min-ram: 8192
|
||||
boot-from-volume: True
|
||||
volume-size: 100
|
||||
|
||||
- name: cloud2
|
||||
cloud: chocolate-cloud
|
||||
|
47
nodepool/tests/fixtures/node_boot_from_volume.yaml
vendored
Normal file
47
nodepool/tests/fixtures/node_boot_from_volume.yaml
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
elements-dir: .
|
||||
images-dir: '{images_dir}'
|
||||
|
||||
zookeeper-servers:
|
||||
- host: {zookeeper_host}
|
||||
port: {zookeeper_port}
|
||||
chroot: {zookeeper_chroot}
|
||||
|
||||
labels:
|
||||
- name: fake-label
|
||||
min-ready: 1
|
||||
|
||||
providers:
|
||||
- name: fake-provider
|
||||
cloud: fake
|
||||
region-name: fake-region
|
||||
rate: 0.0001
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
meta:
|
||||
key: value
|
||||
key2: value
|
||||
pools:
|
||||
- name: main
|
||||
max-servers: 96
|
||||
availability-zones:
|
||||
- az1
|
||||
networks:
|
||||
- net-name
|
||||
labels:
|
||||
- name: fake-label
|
||||
diskimage: fake-image
|
||||
min-ram: 8192
|
||||
flavor-name: 'Fake'
|
||||
boot-from-volume: True
|
||||
|
||||
diskimages:
|
||||
- name: fake-image
|
||||
elements:
|
||||
- fedora
|
||||
- vm
|
||||
release: 21
|
||||
env-vars:
|
||||
TMPDIR: /opt/dib_tmp
|
||||
DIB_IMAGE_CACHE: /opt/dib_cache
|
||||
DIB_CLOUD_IMAGES: http://download.fedoraproject.org/pub/fedora/linux/releases/test/21-Beta/Cloud/Images/x86_64/
|
||||
BASE_IMAGE_FILE: Fedora-Cloud-Base-20141029-21_Beta.x86_64.qcow2
|
@ -225,6 +225,19 @@ class TestLauncher(tests.DBTestCase):
|
||||
self.assertEqual(nodes[0].type, 'fake-label')
|
||||
self.assertNotEqual(nodes[0].host_keys, [])
|
||||
|
||||
def test_node_boot_from_volume(self):
|
||||
"""Test that an image and node are created from a volume"""
|
||||
configfile = self.setup_config('node_boot_from_volume.yaml')
|
||||
pool = self.useNodepool(configfile, watermark_sleep=1)
|
||||
self._useBuilder(configfile)
|
||||
pool.start()
|
||||
self.waitForImage('fake-provider', 'fake-image')
|
||||
nodes = self.waitForNodes('fake-label')
|
||||
|
||||
self.assertEqual(len(nodes), 1)
|
||||
self.assertEqual(nodes[0].provider, 'fake-provider')
|
||||
self.assertEqual(nodes[0].type, 'fake-label')
|
||||
|
||||
def test_disabled_label(self):
|
||||
"""Test that a node is not created with min-ready=0"""
|
||||
configfile = self.setup_config('node_disabled_label.yaml')
|
||||
|
@ -10,7 +10,7 @@ PrettyTable>=0.6,<0.8
|
||||
# shade has a looser requirement on six than nodepool, so install six first
|
||||
six>=1.7.0
|
||||
os-client-config>=1.2.0
|
||||
shade>=1.18.1
|
||||
shade>=1.21.0
|
||||
diskimage-builder>=2.0.0
|
||||
voluptuous
|
||||
kazoo
|
||||
|
Loading…
x
Reference in New Issue
Block a user