Toggle host-key-checking for openstack provider.labels
This adds the ability for a pool.label to override the pool.host-key-checking value, while having a label exist in the same pool. This is helpful because it is possible for 1 pool to mix network configuration, and some nodes maybe missing a default gateway (making them unroutable by default). Change-Id: I934d42b8e48aedb0ebc03137b7305eb2af390fc7 Signed-off-by: Paul Belanger <pabelanger@redhat.com>
This commit is contained in:
parent
74f960848c
commit
3a5cabedcb
@ -926,6 +926,18 @@ Selecting the OpenStack driver adds the following options to the
|
|||||||
If given, the label for use in this pool will create a
|
If given, the label for use in this pool will create a
|
||||||
volume from the image and boot the node from it.
|
volume from the image and boot the node from it.
|
||||||
|
|
||||||
|
.. attr:: host-key-checking
|
||||||
|
:type: bool
|
||||||
|
:default: True
|
||||||
|
|
||||||
|
Specify custom behavior of validation of SSH host keys. When set to
|
||||||
|
False, nodepool-launcher will not ssh-keyscan nodes after they are
|
||||||
|
booted. This might be needed if nodepool-launcher and the nodes it
|
||||||
|
launches are on different networks. The default value is True.
|
||||||
|
|
||||||
|
.. note:: This value will override the value for
|
||||||
|
:attr:`providers.[openstack].pools.host-key-checking`.
|
||||||
|
|
||||||
.. attr:: networks
|
.. attr:: networks
|
||||||
:type: list
|
:type: list
|
||||||
|
|
||||||
|
@ -91,6 +91,7 @@ class ProviderLabel(ConfigValue):
|
|||||||
self.instance_properties = None
|
self.instance_properties = None
|
||||||
self.userdata = None
|
self.userdata = None
|
||||||
self.networks = []
|
self.networks = []
|
||||||
|
self.host_key_checking = True
|
||||||
# The ProviderPool object that owns this label.
|
# The ProviderPool object that owns this label.
|
||||||
self.pool = None
|
self.pool = None
|
||||||
|
|
||||||
@ -109,7 +110,8 @@ class ProviderLabel(ConfigValue):
|
|||||||
other.volume_size == self.volume_size and
|
other.volume_size == self.volume_size and
|
||||||
other.instance_properties == self.instance_properties and
|
other.instance_properties == self.instance_properties and
|
||||||
other.userdata == self.userdata and
|
other.userdata == self.userdata and
|
||||||
other.networks == self.networks)
|
other.networks == self.networks and
|
||||||
|
other.host_key_checking == self.host_key_checking)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
@ -211,6 +213,8 @@ class ProviderPool(ConfigPool):
|
|||||||
None)
|
None)
|
||||||
pl.userdata = label.get('userdata', None)
|
pl.userdata = label.get('userdata', None)
|
||||||
pl.networks = label.get('networks', self.networks)
|
pl.networks = label.get('networks', self.networks)
|
||||||
|
pl.host_key_checking = label.get(
|
||||||
|
'host-key-checking', self.host_key_checking)
|
||||||
|
|
||||||
top_label = full_config.labels[pl.name]
|
top_label = full_config.labels[pl.name]
|
||||||
top_label.pools.append(self)
|
top_label.pools.append(self)
|
||||||
@ -367,6 +371,7 @@ class OpenStackProviderConfig(ProviderConfig):
|
|||||||
'instance-properties': dict,
|
'instance-properties': dict,
|
||||||
'userdata': str,
|
'userdata': str,
|
||||||
'networks': [str],
|
'networks': [str],
|
||||||
|
'host-key-checking': bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
label_min_ram = v.Schema({v.Required('min-ram'): int}, extra=True)
|
label_min_ram = v.Schema({v.Required('min-ram'): int}, extra=True)
|
||||||
|
@ -217,7 +217,7 @@ class OpenStackNodeLauncher(NodeLauncher):
|
|||||||
|
|
||||||
# wait and scan the new node and record in ZooKeeper
|
# wait and scan the new node and record in ZooKeeper
|
||||||
host_keys = []
|
host_keys = []
|
||||||
if self.pool.host_key_checking:
|
if self.label.host_key_checking:
|
||||||
try:
|
try:
|
||||||
self.log.debug(
|
self.log.debug(
|
||||||
"Gathering host keys for node %s", self.node.id)
|
"Gathering host keys for node %s", self.node.id)
|
||||||
|
@ -11,6 +11,8 @@ zookeeper-servers:
|
|||||||
labels:
|
labels:
|
||||||
- name: fake-label
|
- name: fake-label
|
||||||
min-ready: 1
|
min-ready: 1
|
||||||
|
- name: fake-label2
|
||||||
|
min-ready: 1
|
||||||
|
|
||||||
providers:
|
providers:
|
||||||
- name: fake-provider
|
- name: fake-provider
|
||||||
@ -37,6 +39,22 @@ providers:
|
|||||||
min-ram: 8192
|
min-ram: 8192
|
||||||
flavor-name: 'Fake'
|
flavor-name: 'Fake'
|
||||||
|
|
||||||
|
- name: fake-provider2
|
||||||
|
cloud: fake
|
||||||
|
driver: fake
|
||||||
|
region-name: fake-region
|
||||||
|
rate: 0.0001
|
||||||
|
diskimages:
|
||||||
|
- name: fake-image
|
||||||
|
pools:
|
||||||
|
- name: main
|
||||||
|
max-servers: 96
|
||||||
|
labels:
|
||||||
|
- name: fake-label2
|
||||||
|
diskimage: fake-image
|
||||||
|
host-key-checking: False
|
||||||
|
min-ram: 8192
|
||||||
|
|
||||||
diskimages:
|
diskimages:
|
||||||
- name: fake-image
|
- name: fake-image
|
||||||
elements:
|
elements:
|
||||||
|
@ -514,21 +514,29 @@ class TestLauncher(tests.DBTestCase):
|
|||||||
{'key1': 'value1', 'key2': 'value2'})
|
{'key1': 'value1', 'key2': 'value2'})
|
||||||
|
|
||||||
def test_node_host_key_checking_false(self):
|
def test_node_host_key_checking_false(self):
|
||||||
"""Test that an image and node are created"""
|
"""Test that images and nodes are created"""
|
||||||
configfile = self.setup_config('node-host-key-checking.yaml')
|
configfile = self.setup_config('node-host-key-checking.yaml')
|
||||||
pool = self.useNodepool(configfile, watermark_sleep=1)
|
pool = self.useNodepool(configfile, watermark_sleep=1)
|
||||||
self.useBuilder(configfile)
|
self.useBuilder(configfile)
|
||||||
pool.start()
|
pool.start()
|
||||||
image = self.waitForImage('fake-provider', 'fake-image')
|
image = self.waitForImage('fake-provider', 'fake-image')
|
||||||
self.assertEqual(image.username, 'zuul')
|
self.assertEqual(image.username, 'zuul')
|
||||||
nodes = self.waitForNodes('fake-label')
|
label1_nodes = self.waitForNodes('fake-label')
|
||||||
|
label2_nodes = self.waitForNodes('fake-label2')
|
||||||
|
|
||||||
self.assertEqual(len(nodes), 1)
|
self.assertEqual(len(label1_nodes), 1)
|
||||||
self.assertEqual(nodes[0].provider, 'fake-provider')
|
self.assertEqual(label1_nodes[0].provider, 'fake-provider')
|
||||||
self.assertEqual(nodes[0].type, ['fake-label'])
|
self.assertEqual(label1_nodes[0].type, ['fake-label'])
|
||||||
self.assertEqual(nodes[0].username, 'zuul')
|
self.assertEqual(label1_nodes[0].username, 'zuul')
|
||||||
# We have no host_keys because host-key-checking is False.
|
# We have no host_keys because pool.host-key-checking is False.
|
||||||
self.assertEqual(nodes[0].host_keys, [])
|
self.assertEqual(label1_nodes[0].host_keys, [])
|
||||||
|
|
||||||
|
self.assertEqual(len(label2_nodes), 1)
|
||||||
|
self.assertEqual(label2_nodes[0].provider, 'fake-provider2')
|
||||||
|
self.assertEqual(label2_nodes[0].type, ['fake-label2'])
|
||||||
|
self.assertEqual(label2_nodes[0].username, 'zuul')
|
||||||
|
# We have no host_keys because label.host-key-checking is False.
|
||||||
|
self.assertEqual(label2_nodes[0].host_keys, [])
|
||||||
|
|
||||||
def test_multiple_launcher(self):
|
def test_multiple_launcher(self):
|
||||||
"""Test that an image and node are created with 2 launchers"""
|
"""Test that an image and node are created with 2 launchers"""
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Provider labels for the OpenStack driver are now able to toggle
|
||||||
|
:attr:`providers.[openstack].pools.labels.host-key-checking`. This
|
||||||
|
overrides the host-key-checking value defined by
|
||||||
|
:attr:`providers.[openstack].pools.host-key-checking`.
|
Loading…
x
Reference in New Issue
Block a user