From 32c000eebafcb58a656fb5a2e2e1195b6801fcfb Mon Sep 17 00:00:00 2001
From: Ruslan Aliev <raliev@mirantis.com>
Date: Tue, 9 Feb 2021 20:22:48 -0600
Subject: [PATCH] Set proper role for worker node

Since it's not possible to self assign node labels such as role
it must be done after bootstrap process. [1]

[1] https://cluster-api.sigs.k8s.io/user/troubleshooting.html#labeling-nodes-with-reserved-labels-such-as-node-rolekubernetesio-fails-with-kubeadm-error-during-bootstrap
Signed-off-by: Ruslan Aliev <raliev@mirantis.com>

Change-Id: I5163bcd4ddc8c74900904d8032b215406b4c513d
---
 .../target/workers/provision/kubeadmconfigtemplate.yaml  | 2 +-
 tools/deployment/34_deploy_worker_node.sh                | 9 +++++++--
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/manifests/site/test-site/target/workers/provision/kubeadmconfigtemplate.yaml b/manifests/site/test-site/target/workers/provision/kubeadmconfigtemplate.yaml
index 20b070236..ecf8dbd8c 100644
--- a/manifests/site/test-site/target/workers/provision/kubeadmconfigtemplate.yaml
+++ b/manifests/site/test-site/target/workers/provision/kubeadmconfigtemplate.yaml
@@ -9,7 +9,7 @@ spec:
         nodeRegistration:
           name: '{{ ds.meta_data.name }}'
           kubeletExtraArgs:
-            node-labels: 'metal3.io/uuid={{ ds.meta_data.uuid }},node-type=worker'
+            node-labels: 'metal3.io/uuid={{ ds.meta_data.uuid }}'
             provider-id: 'metal3://{{ ds.meta_data.uuid }}'
             feature-gates: "IPv6DualStack=true"
       ntp:
diff --git a/tools/deployment/34_deploy_worker_node.sh b/tools/deployment/34_deploy_worker_node.sh
index 39778723b..594f71f22 100755
--- a/tools/deployment/34_deploy_worker_node.sh
+++ b/tools/deployment/34_deploy_worker_node.sh
@@ -18,6 +18,7 @@ set -xe
 export TIMEOUT=${TIMEOUT:-3600}
 export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
 export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
+WORKER_NODE="node03"
 
 echo "Stop ephemeral node"
 sudo virsh destroy air-ephemeral
@@ -25,7 +26,11 @@ sudo virsh destroy air-ephemeral
 node_timeout () {
     end=$(($(date +%s) + $TIMEOUT))
     while true; do
-        if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 node03 | grep -qw $2) ; then
+        if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1 $WORKER_NODE | grep -qw $2) ; then
+            if [ "$1" = "node" ]; then
+              kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT label nodes $WORKER_NODE node-role.kubernetes.io/worker=""
+            fi
+
             echo -e "\nGet $1 status"
             kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get $1
             break
@@ -42,7 +47,7 @@ node_timeout () {
 }
 
 echo "Deploy worker node"
-airshipctl phase run  workers-target --debug
+airshipctl phase run workers-target --debug
 
 echo "Waiting $TIMEOUT seconds for bmh to be in ready state."
 node_timeout bmh ready