From b6107826f4a57b7c60f4d2744e63df28e6b0cca7 Mon Sep 17 00:00:00 2001 From: "Yuanbin.Chen" Date: Fri, 23 Feb 2018 16:31:11 +0800 Subject: [PATCH] Add cluster and receiver function test - cluster function test - policy attach/detach function test - receiver function test Add api microversion openstacksdk bug report link: https://storyboard.openstack.org/#!/story/2003146 Change-Id: Ibe30c1402c80061a60b9499681fe00771d72ab0e Signed-off-by: Yuanbin.Chen --- senlinclient/tests/functional/base.py | 27 +- .../tests/functional/test_cluster_policy.py | 109 ++++++++ .../tests/functional/test_clusters.py | 246 ++++++++++++++++++ .../tests/functional/test_receivers.py | 33 +++ 4 files changed, 414 insertions(+), 1 deletion(-) create mode 100644 senlinclient/tests/functional/test_cluster_policy.py diff --git a/senlinclient/tests/functional/base.py b/senlinclient/tests/functional/base.py index 8b13bf5d..c9a7d103 100644 --- a/senlinclient/tests/functional/base.py +++ b/senlinclient/tests/functional/base.py @@ -113,7 +113,7 @@ class OpenStackClientTestBase(base.ClientTestBase): % (check_type, name, timeout)) raise tempest_lib_exc.TimeoutException(message) - def policy_create(self, name, policy): + def policy_create(self, name, policy='deletion_policy.yaml'): pf = self._get_policy_path(policy) cmd = ('cluster policy create --spec-file %s %s' % (pf, name)) @@ -149,3 +149,28 @@ class OpenStackClientTestBase(base.ClientTestBase): cmd = ('cluster node delete %s --force' % name_or_id) self.openstack(cmd) self.wait_for_delete(name_or_id, 'node', 120) + + def cluster_create(self, profile, name, desired_capacity=0): + cmd = ('cluster create --profile %s --desired-capacity %d %s' + % (profile, desired_capacity, name)) + cluster_raw = self.openstack(cmd) + result = self.show_to_dict(cluster_raw) + self.wait_for_status(name, 'ACTIVE', 'cluster', 120) + return result + + def cluster_delete(self, name_or_id): + cmd = ('cluster delete %s --force' % name_or_id) + self.openstack(cmd) + self.wait_for_delete(name_or_id, 'cluster', 120) + + def receiver_create(self, name, cluster, action='CLUSTER_SCALE_OUT', + rt='webhook'): + cmd = ('cluster receiver create --cluster %s --action %s --type %s ' + '%s' % (cluster, action, rt, name)) + receiver_raw = self.openstack(cmd) + result = self.show_to_dict(receiver_raw) + return result + + def receiver_delete(self, name_or_id): + cmd = ('cluster receiver delete %s --force' % name_or_id) + self.openstack(cmd) diff --git a/senlinclient/tests/functional/test_cluster_policy.py b/senlinclient/tests/functional/test_cluster_policy.py new file mode 100644 index 00000000..b99d3ee2 --- /dev/null +++ b/senlinclient/tests/functional/test_cluster_policy.py @@ -0,0 +1,109 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from senlinclient.tests.functional import base + + +class ClusterPolicyTest(base.OpenStackClientTestBase): + """Test cluster for policy""" + + def test_cluster_policy_attach_and_detach(self): + name = self.name_generate() + po = self.policy_create(name) + self.addCleanup(self.policy_delete, po['id']) + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['id']) + + cp_raw = self.openstack('cluster policy binding list %s' + % cluster['id']) + cp_data = self.show_to_dict(cp_raw) + self.assertEqual({}, cp_data) + + # Attach policy to cluster + cmd = ('cluster policy attach --policy %s %s' % (po['id'], + cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + cmd = ('cluster policy binding show --policy %s %s' % + (po['id'], cluster['id'])) + policy_raw = self.openstack(cmd) + policy_data = self.show_to_dict(policy_raw) + self.assertEqual(po['name'], policy_data['policy_name']) + self.assertEqual(cluster['name'], policy_data['cluster_name']) + self.assertTrue(policy_data['is_enabled']) + + # Detach policy from cluster + cmd = ('cluster policy detach --policy %s %s' % (po['id'], + cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + cp_raw = self.openstack('cluster policy binding list %s' + % cluster['id']) + cp_data = self.show_to_dict(cp_raw) + self.assertEqual({}, cp_data) + + def test_cluster_policy_list(self): + name = self.name_generate() + po = self.policy_create(name) + self.addCleanup(self.policy_delete, po['id']) + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['id']) + + cmd = ('cluster policy attach --policy %s %s' % (po['id'], + cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + + # List cluster policy binding + cmd = ('cluster policy binding list --filters policy_name=%s %s' + % (po['name'], cluster['id'])) + result = self.openstack(cmd) + binding_list = self.parser.listing(result) + self.assertTableStruct(binding_list, ['policy_id', 'policy_name', + 'policy_type', 'is_enabled']) + cmd = ('cluster policy detach --policy %s %s' % (po['id'], + cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + + def test_cluster_policy_update(self): + name = self.name_generate() + po = self.policy_create(name) + self.addCleanup(self.policy_delete, po['id']) + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['id']) + + cmd = ('cluster policy attach --policy %s %s' % (po['id'], + cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + + # Update cluster policy binding + cmd = ('cluster policy binding update --policy %s --enabled false %s' + % (po['id'], cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + + cp_update = self.openstack('cluster policy binding show --policy %s %s' + % (po['id'], cluster['id'])) + cp_update_data = self.show_to_dict(cp_update) + self.assertFalse(cp_update_data['is_enabled'].isupper()) + cmd = ('cluster policy detach --policy %s %s' % (po['id'], + cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) diff --git a/senlinclient/tests/functional/test_clusters.py b/senlinclient/tests/functional/test_clusters.py index b49ae963..97502e26 100644 --- a/senlinclient/tests/functional/test_clusters.py +++ b/senlinclient/tests/functional/test_clusters.py @@ -9,6 +9,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from tempest.lib import decorators from senlinclient.tests.functional import base @@ -39,3 +40,248 @@ class ClusterTest(base.OpenStackClientTestBase): def test_cluster_limit(self): self.openstack('cluster list --limit 1') + + def test_cluster_create(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name, 1) + self.addCleanup(self.cluster_delete, cluster['id']) + cluster_raw = self.openstack('cluster show %s' % name) + cluster_data = self.show_to_dict(cluster_raw) + self.assertEqual(cluster_data['name'], name) + self.assertEqual(cluster_data['status'], 'ACTIVE') + self.assertEqual(cluster_data['desired_capacity'], '1') + + def test_cluster_update(self): + old_name = self.name_generate() + old_pf = self.profile_create(old_name) + self.addCleanup(self.profile_delete, old_pf['id']) + new_name = self.name_generate() + new_pf = self.profile_create(new_name) + self.addCleanup(self.profile_delete, new_pf['id']) + cluster = self.cluster_create(old_pf['id'], old_name, 1) + self.addCleanup(self.cluster_delete, cluster['id']) + self.assertEqual(cluster['name'], old_name) + + # cluster update + cmd = ('cluster update --name %s --profile %s --timeout 300 %s' + % (old_name, new_pf['id'], cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + + cluster_raw = self.openstack('cluster show %s' % cluster['id']) + cluster_data = self.show_to_dict(cluster_raw) + node_raw = self.openstack('cluster node show %s' % + cluster_data['node_ids']) + node_data = self.show_to_dict(node_raw) + + # if not profile-only, change all profile + self.assertEqual(cluster['name'], cluster_data['name']) + self.assertEqual(cluster_data['profile_id'], new_pf['id']) + self.assertEqual(cluster_data['timeout'], '300') + self.assertEqual(new_pf['name'], node_data['profile_name']) + + # (chenyb4) cluster update profile only need api microversion support, + # skip cluster update profile only before openstacksdk support + # api microversion. + @decorators.skip_because(bug="2003146") + def test_cluster_update_profile_only(self): + old_name = self.name_generate() + old_pf = self.profile_create(old_name) + self.addCleanup(self.profile_delete, old_pf['id']) + new_name = self.name_generate() + new_pf = self.profile_create(new_name) + self.addCleanup(self.profile_delete, new_pf['id']) + cluster = self.cluster_create(old_pf['id'], old_name, 1) + self.addCleanup(self.cluster_delete, cluster['id']) + self.assertEqual(cluster['name'], old_name) + + cmd = ('cluster update --name %s --profile %s --profile-only true' + ' --timeout 300 %s' % (new_name, new_pf['id'], cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + + cluster_raw = self.openstack('cluster show %s' % cluster['id']) + cluster_data = self.show_to_dict(cluster_raw) + node_raw = self.openstack('cluster node show %s' % + cluster_data['node_ids']) + node_data = self.show_to_dict(node_raw) + + # if profile-only true, not change exist node profile + self.assertNotEqual(cluster['name'], cluster_data['name']) + self.assertNotEqual(cluster_data['profile_id'], cluster['profile_id']) + self.assertEqual(cluster_data['profile_id'], new_pf['id']) + self.assertEqual(cluster_data['timeout'], '300') + self.assertNotEqual(new_name, node_data['profile_name']) + + def test_cluster_show(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['id']) + cluster_raw = self.openstack('cluster show %s' % name) + cluster_data = self.show_to_dict(cluster_raw) + self.assertIn('node_ids', cluster_data) + self.assertIn('timeout', cluster_data) + + def test_cluster_expand_and_shrink(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['id']) + cluster_raw = self.openstack('cluster show %s' % name) + cluster_data = self.show_to_dict(cluster_raw) + + # cluster expand + self.openstack('cluster expand --count 1 %s' % name) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + expand_raw = self.openstack('cluster show %s' % name) + expand_data = self.show_to_dict(expand_raw) + self.assertNotEqual(cluster_data['desired_capacity'], + expand_data['desired_capacity']) + self.assertEqual(expand_data['desired_capacity'], '1') + + # cluster shrink + self.openstack('cluster shrink --count 1 %s' % name) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + shrink_raw = self.openstack('cluster show %s' % name) + shrink_data = self.show_to_dict(shrink_raw) + self.assertNotEqual(shrink_data['desired_capacity'], + expand_data['desired_capacity']) + self.assertEqual(cluster_data['desired_capacity'], + shrink_data['desired_capacity']) + + # NOTE(chenyb4): Since functional tests only focus on the client/server + # interaction without invovling other OpenStack services, it is not + # possible to mock a cluster failure and then test if the check logic + # works. Such tests would be left to integration tests instead. + def test_cluster_check(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name, 1) + self.addCleanup(self.cluster_delete, cluster['id']) + self.openstack('cluster check %s' % cluster['id']) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + check_raw = self.openstack('cluster show %s' % name) + check_data = self.show_to_dict(check_raw) + self.assertIn('CLUSTER_CHECK', check_data['status_reason']) + cluster_status = ['ACTIVE', 'WARNING'] + self.assertIn(check_data['status'], cluster_status) + + # NOTE(chenyb4): A end-to-end test of the cluster recover operation needs + # to be done with other OpenStack services involved, thus out of scope + # for functional tests. Such tests would be left to integration tests + # instead. + def test_cluster_recover(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name, 1) + self.addCleanup(self.cluster_delete, cluster['id']) + cmd = ('cluster recover --check true %s' % cluster['id']) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + recover_raw = self.openstack('cluster show %s' % name) + recover_data = self.show_to_dict(recover_raw) + self.assertIn('CLUSTER_RECOVER', recover_data['status_reason']) + self.assertEqual('ACTIVE', recover_data['status']) + + def test_cluster_resize(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['id']) + cluster_raw = self.openstack('cluster show %s' % name) + cluster_data = self.show_to_dict(cluster_raw) + self.assertEqual(cluster_data['desired_capacity'], '0') + self.assertEqual(cluster_data['max_size'], '-1') + self.assertEqual(cluster_data['min_size'], '0') + cmd = ('cluster resize --max-size 5 --min-size 1 --adjustment 2 %s' + % cluster['id']) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + resize_raw = self.openstack('cluster show %s' % name) + resize_data = self.show_to_dict(resize_raw) + self.assertEqual(resize_data['desired_capacity'], '2') + self.assertEqual(resize_data['max_size'], '5') + self.assertEqual(resize_data['min_size'], '1') + + def test_cluster_members_list(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['id']) + result = self.openstack('cluster members list --full-id %s' + % cluster['id']) + members_list = self.parser.listing(result) + self.assertTableStruct(members_list, ['id', 'name', 'index', + 'status', 'physical_id', + 'created_at']) + + def test_cluster_members_add_and_del(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['name']) + node = self.node_create(pf['id'], name) + self.addCleanup(self.node_delete, node['id']) + cluster_raw = self.openstack('cluster show %s' % name) + cluster_data = self.show_to_dict(cluster_raw) + self.assertEqual('', cluster_data['node_ids']) + + # Add exist node to cluster + cmd = ('cluster members add --nodes %s %s' % (node['name'], + cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + + mem_ad_raw = self.openstack('cluster show %s' % name) + mem_ad_data = self.show_to_dict(mem_ad_raw) + self.assertNotEqual('', mem_ad_data['node_ids']) + self.assertIn(node['id'], mem_ad_data['node_ids']) + + # Delete node from cluster + cmd = ('cluster members del --nodes %s %s' % (node['name'], + cluster['id'])) + self.openstack(cmd) + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + mem_del_raw = self.openstack('cluster show %s' % name) + mem_del_data = self.show_to_dict(mem_del_raw) + self.assertEqual('', mem_del_data['node_ids']) + self.assertNotIn(node['id'], mem_del_data['node_ids']) + + # (chenyb4) cluster members replace need api microversion support, + # skip cluster members replace before openstacksdk support api microversion + @decorators.skip_because(bug="2003146") + def test_cluster_members_replace(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name, 1) + self.addCleanup(self.cluster_delete, cluster['id']) + cluster_raw = self.openstack('cluster show %s' % name) + cluster_data = self.show_to_dict(cluster_raw) + + # Create replace node + new_node = self.node_create(pf['id'], name) + self.addCleanup(self.node_delete, new_node['id']) + self.assertNotIn(new_node['id'], cluster_data['node_ids']) + + # Cluster node replace + old_node = cluster_data['node_ids'] + self.addCleanup(self.node_delete, old_node) + cmd = ('cluster members replace --nodes %s=%s %s' + % (old_node, new_node['id'], cluster['id'])) + self.openstack(cmd, flags='--debug') + self.wait_for_status(cluster['id'], 'ACTIVE', 'cluster', 120) + replace_raw = self.openstack('cluster show %s' % name) + replace_data = self.show_to_dict(replace_raw) + self.assertIn(new_node['id'], replace_data['node_ids']) + self.assertNotIn(old_node, replace_data['node_ids']) diff --git a/senlinclient/tests/functional/test_receivers.py b/senlinclient/tests/functional/test_receivers.py index 423d6630..27df7015 100644 --- a/senlinclient/tests/functional/test_receivers.py +++ b/senlinclient/tests/functional/test_receivers.py @@ -22,3 +22,36 @@ class ReceiverTest(base.OpenStackClientTestBase): self.assertTableStruct(receiver_list, ['id', 'name', 'type', 'cluster_id', 'action', 'created_at']) + + def test_receiver_create(self): + name = self.name_generate() + pf = self.profile_create(name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], name) + self.addCleanup(self.cluster_delete, cluster['id']) + receiver = self.receiver_create(name, cluster['id']) + self.addCleanup(self.receiver_delete, receiver['id']) + self.assertEqual(receiver['name'], name) + self.assertEqual(receiver['type'], 'webhook') + self.assertEqual(receiver['action'], 'CLUSTER_SCALE_OUT') + + def test_receiver_update(self): + old_name = self.name_generate() + pf = self.profile_create(old_name) + self.addCleanup(self.profile_delete, pf['id']) + cluster = self.cluster_create(pf['id'], old_name) + self.addCleanup(self.cluster_delete, cluster['id']) + receiver = self.receiver_create(old_name, cluster['id']) + self.addCleanup(self.receiver_delete, receiver['id']) + new_name = self.name_generate() + + cmd = ('cluster receiver update --name %s --params count=2 ' + '--action CLUSTER_SCALE_IN %s' % (new_name, receiver['id'])) + self.openstack(cmd) + receiver_raw = self.openstack('cluster receiver show %s' + % receiver['id']) + receiver_data = self.show_to_dict(receiver_raw) + self.assertNotEqual(receiver['name'], receiver_data['name']) + self.assertEqual(receiver_data['name'], new_name) + self.assertNotEqual(receiver['action'], receiver_data['action']) + self.assertEqual(receiver_data['action'], 'CLUSTER_SCALE_IN')