From 3fbf86504879fb870556f21c4c7a55503cd5d665 Mon Sep 17 00:00:00 2001
From: Kostiantyn Kalynovskyi <kkalinovskiy@gmail.com>
Date: Wed, 17 Mar 2021 16:26:56 +0000
Subject: [PATCH] Fix CAPD deployment

Commit fixes CAPD deployment and removes redundant scripts
that check expiration for CAPD site.

They must be tested separately outside CAPD pipeline

Related-To: #482
Closes: #482
Change-Id: I60ffd76a4f3f08bd7bd198a0c2b15483dfbdd6a6
---
 .../provider_common/30_deploy_controlplane.sh | 52 +++++--------------
 .../32_cluster_init_target_node.sh            |  6 +--
 .../33_cluster_move_target_node.sh            | 22 ++++----
 .../provider_common/34_deploy_worker_node.sh  | 12 +----
 zuul.d/jobs.yaml                              |  8 ++-
 5 files changed, 32 insertions(+), 68 deletions(-)

diff --git a/tools/deployment/provider_common/30_deploy_controlplane.sh b/tools/deployment/provider_common/30_deploy_controlplane.sh
index 430549903..306f7ddb7 100755
--- a/tools/deployment/provider_common/30_deploy_controlplane.sh
+++ b/tools/deployment/provider_common/30_deploy_controlplane.sh
@@ -13,58 +13,34 @@
 # limitations under the License.
 
 # Example Usage
-# CONTROLPLANE_COUNT=1 \
-# SITE=docker-test-site \
 # ./tools/deployment/provider_common/30_deploy_controlplane.sh
 
-export AIRSHIP_SRC=${AIRSHIP_SRC:-"/tmp/airship"}
+set -xe
+
 export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
-export CONTROLPLANE_COUNT=${CONTROLPLANE_COUNT:-"1"}
-export SITE=${SITE:-"docker-test-site"}
-export TARGET_CLUSTER_NAME=${TARGET_CLUSTER_NAME:-"target-cluster"}
-
-# Adjust Control Plane Count (default 1)
-# No. of control plane can be changed using
-# CONTROLPLANE_COUNT=<replicas> tools/deployment/docker/30_deploy_controlplane.sh
-
-sed -i "/value.*/s//value\": $CONTROLPLANE_COUNT }/g" \
-${AIRSHIP_SRC}/airshipctl/manifests/site/${SITE}/ephemeral/controlplane/machine_count.json
+export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
+export KUBECONFIG_EPHEMERAL_CONTEXT=${KUBECONFIG_EPHEMERAL_CONTEXT:-"ephemeral-cluster"}
 
 echo "create control plane"
-airshipctl phase run controlplane-ephemeral --debug --kubeconfig ${KUBECONFIG} --wait-timeout 1000s
+airshipctl phase run controlplane-ephemeral --debug --wait-timeout 1000s
 
-TARGET_KUBECONFIG=""
-TARGET_KUBECONFIG=$(kubectl --kubeconfig "${KUBECONFIG}" --namespace=default get secret/"${TARGET_CLUSTER_NAME}"-kubeconfig -o jsonpath={.data.value}  || true)
+airshipctl cluster get-kubeconfig > ~/.airship/kubeconfig-tmp
 
-if [[ -z "$TARGET_KUBECONFIG" ]]; then
-  echo "Error: Could not get kubeconfig from secret."
-  exit 1
-fi
-
-echo "Generate kubeconfig"
-echo ${TARGET_KUBECONFIG} | base64 -d > /tmp/${TARGET_CLUSTER_NAME}.kubeconfig
-echo "Generate kubeconfig: /tmp/${TARGET_CLUSTER_NAME}.kubeconfig"
-
-echo "add context target-cluster"
-kubectl config set-context ${TARGET_CLUSTER_NAME} --user ${TARGET_CLUSTER_NAME}-admin --cluster ${TARGET_CLUSTER_NAME} \
---kubeconfig "/tmp/${TARGET_CLUSTER_NAME}.kubeconfig"
+mv ~/.airship/kubeconfig-tmp "${KUBECONFIG}"
 
 echo "apply cni as a part of initinfra-networking"
-airshipctl phase run initinfra-networking-target --debug --kubeconfig "/tmp/${TARGET_CLUSTER_NAME}.kubeconfig"
+airshipctl phase run initinfra-networking-target --debug
 
 echo "Check nodes status"
-kubectl --kubeconfig /tmp/"${TARGET_CLUSTER_NAME}".kubeconfig wait --for=condition=Ready nodes --all --timeout 4000s
-kubectl get nodes --kubeconfig /tmp/"${TARGET_CLUSTER_NAME}".kubeconfig
+kubectl --kubeconfig "${KUBECONFIG}" --context "${KUBECONFIG_TARGET_CONTEXT}" wait --for=condition=Ready nodes --all --timeout 4000s
+kubectl get nodes --kubeconfig "${KUBECONFIG}" --context "${KUBECONFIG_TARGET_CONTEXT}"
 
 echo "Waiting for  pods to come up"
-kubectl --kubeconfig /tmp/${TARGET_CLUSTER_NAME}.kubeconfig  wait --for=condition=ready pods --all --timeout=4000s -A
-kubectl --kubeconfig /tmp/${TARGET_CLUSTER_NAME}.kubeconfig get pods -A
+kubectl --kubeconfig "${KUBECONFIG}" --context "${KUBECONFIG_TARGET_CONTEXT}" wait --for=condition=ready pods --all --timeout=4000s -A
+kubectl --kubeconfig "${KUBECONFIG}" --context "${KUBECONFIG_TARGET_CONTEXT}" get pods -A
 
 echo "Check machine status"
-kubectl get machines --kubeconfig ${KUBECONFIG}
+kubectl get machines --kubeconfig ${KUBECONFIG} --context "${KUBECONFIG_EPHEMERAL_CONTEXT}"
 
 echo "Get cluster state for target workload cluster "
-kubectl --kubeconfig ${KUBECONFIG} get cluster
-
-echo "Target Cluster Kubeconfig"
-echo "/tmp/${TARGET_CLUSTER_NAME}.kubeconfig"
+kubectl --kubeconfig ${KUBECONFIG} --context "${KUBECONFIG_EPHEMERAL_CONTEXT}" get cluster
diff --git a/tools/deployment/provider_common/32_cluster_init_target_node.sh b/tools/deployment/provider_common/32_cluster_init_target_node.sh
index ab9b8d213..d49f9816b 100755
--- a/tools/deployment/provider_common/32_cluster_init_target_node.sh
+++ b/tools/deployment/provider_common/32_cluster_init_target_node.sh
@@ -22,17 +22,17 @@ export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
 export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
 
 # Get control plane node
-CONTROL_PLANE_NODES=( $(kubectl --kubeconfig $KUBECONFIG get --no-headers=true nodes \
+CONTROL_PLANE_NODES=( $(kubectl --context $KUBECONFIG_TARGET_CONTEXT --kubeconfig $KUBECONFIG get --no-headers=true nodes \
 | grep cluster-control-plane | awk '{print $1}') )
 
 # Remove noschedule taint to prevent cluster init from timing out
 for i in "${CONTROL_PLANE_NODES}"; do
   echo untainting node $i
-  kubectl taint node $i node-role.kubernetes.io/master- --kubeconfig $KUBECONFIG --request-timeout 10s
+  kubectl taint node $i node-role.kubernetes.io/master- --context $KUBECONFIG_TARGET_CONTEXT --kubeconfig $KUBECONFIG --request-timeout 10s
 done
 
 echo "Deploy CAPI components to target cluster"
-airshipctl phase run clusterctl-init-target --debug --kubeconfig "$KUBECONFIG"
+airshipctl phase run clusterctl-init-target --debug
 
 echo "Waiting for pods to be ready"
 kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=600s
diff --git a/tools/deployment/provider_common/33_cluster_move_target_node.sh b/tools/deployment/provider_common/33_cluster_move_target_node.sh
index eb584b262..a919d3707 100755
--- a/tools/deployment/provider_common/33_cluster_move_target_node.sh
+++ b/tools/deployment/provider_common/33_cluster_move_target_node.sh
@@ -18,18 +18,18 @@ set -xe
 export TIMEOUT=${TIMEOUT:-3600}
 export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
 export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
-export TARGET_KUBECONFIG="/tmp/target-cluster.kubeconfig"
+export KUBECONFIG_EPHEMERAL_CONTEXT=${KUBECONFIG_EPHEMERAL_CONTEXT:-"ephemeral-cluster"}
 
 echo "Waiting for machines to come up"
-kubectl --kubeconfig ${KUBECONFIG} wait --for=condition=Ready machines --all --timeout 4000s
+kubectl --kubeconfig ${KUBECONFIG} --context $KUBECONFIG_EPHEMERAL_CONTEXT wait --for=condition=Ready machines --all --timeout 4000s
 
 #add wait condition
 end=$(($(date +%s) + $TIMEOUT))
 echo "Waiting $TIMEOUT seconds for Machine to be Running."
 while true; do
-    if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG get machines -o json | jq '.items[0].status.phase' | grep -q "Running") ; then
+    if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT get machines -o json | jq '.items[0].status.phase' | grep -q "Running") ; then
         echo -e "\nMachine is Running"
-        kubectl --kubeconfig $KUBECONFIG get machines
+        kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_EPHEMERAL_CONTEXT get machines
         break
     else
         now=$(date +%s)
@@ -42,22 +42,20 @@ while true; do
     fi
 done
 
-echo "Move Cluster Object to Target Cluster"
-KUBECONFIG=$KUBECONFIG:$TARGET_KUBECONFIG kubectl config view --merge --flatten > "/tmp/merged_target_ephemeral.kubeconfig"
-airshipctl phase run clusterctl-move --kubeconfig "/tmp/merged_target_ephemeral.kubeconfig"
+airshipctl phase run clusterctl-move
 
 echo "Waiting for pods to be ready"
-kubectl --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=3000s
-kubectl --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get pods --all-namespaces
+kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=3000s
+kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get pods --all-namespaces
 
 #Wait till crds are created
 end=$(($(date +%s) + $TIMEOUT))
 echo "Waiting $TIMEOUT seconds for crds to be created."
 while true; do
-    if (kubectl --request-timeout 20s --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get cluster target-cluster -o json | jq '.status.controlPlaneReady' | grep -q true) ; then
+    if (kubectl --request-timeout 20s --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get cluster target-cluster -o json | jq '.status.controlPlaneReady' | grep -q true) ; then
         echo -e "\nGet CRD status"
-        kubectl --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get machines
-        kubectl --kubeconfig $TARGET_KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get clusters
+        kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get machines
+        kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get clusters
         break
     else
         now=$(date +%s)
diff --git a/tools/deployment/provider_common/34_deploy_worker_node.sh b/tools/deployment/provider_common/34_deploy_worker_node.sh
index 89f5af104..717f3cc7e 100755
--- a/tools/deployment/provider_common/34_deploy_worker_node.sh
+++ b/tools/deployment/provider_common/34_deploy_worker_node.sh
@@ -19,21 +19,13 @@ set -xe
 # WORKERS_COUNT=3 ./tools/deployment/provider_common/34_deploy_worker_node.sh
 
 export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
-export TARGET_KUBECONFIG=${TARGET_KUBECONFIG:-"/tmp/target-cluster.kubeconfig"}
 export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
-export SITE=${SITE:-"docker-test-site"}
-export WORKERS_COUNT=${WORKERS_COUNT:-"1"}
-export AIRSHIP_SRC=${AIRSHIP_SRC:-"/tmp/airship"}
-
-# Adjust wokers replicas, default - 1
-sed -i "/value.*/s//value\": $WORKERS_COUNT }/g" \
-${AIRSHIP_SRC}/airshipctl/manifests/site/${SITE}/target/workers/machine_count.json
 
 echo "Stop/Delete ephemeral node"
 kind delete cluster --name "ephemeral-cluster"
 
 echo "Deploy worker node"
-airshipctl phase run  workers-target --debug --kubeconfig "$TARGET_KUBECONFIG"
+airshipctl phase run  workers-target --debug
 
 #Wait till node is created
-kubectl wait --for=condition=ready node --all --timeout=1000s --context $KUBECONFIG_TARGET_CONTEXT --kubeconfig $TARGET_KUBECONFIG -A
+kubectl wait --for=condition=ready node --all --timeout=1000s --context $KUBECONFIG_TARGET_CONTEXT --kubeconfig $KUBECONFIG -A
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index a36b8efb1..5c7e1e8f5 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -187,12 +187,10 @@
         - AIRSHIP_CONFIG_METADATA_PATH=manifests/site/docker-test-site/metadata.yaml SITE=docker-test-site EXTERNAL_KUBECONFIG="true" ./tools/deployment/22_test_configs.sh
         - ./tools/deployment/23_pull_documents.sh
         - PROVIDER=default SITE=docker-test-site ./tools/deployment/26_deploy_capi_ephemeral_node.sh
-        - CONTROLPLANE_COUNT=1 SITE=docker-test-site ./tools/deployment/provider_common/30_deploy_controlplane.sh
-        - KUBECONFIG=/tmp/target-cluster.kubeconfig ./tools/deployment/provider_common/32_cluster_init_target_node.sh
+        - ./tools/deployment/provider_common/30_deploy_controlplane.sh
+        - ./tools/deployment/provider_common/32_cluster_init_target_node.sh
         - ./tools/deployment/provider_common/33_cluster_move_target_node.sh
-        - WORKERS_COUNT=2 KUBECONFIG=/tmp/target-cluster.kubeconfig SITE=docker-test-site ./tools/deployment/provider_common/34_deploy_worker_node.sh
-        - KUBECONFIG=/tmp/target-cluster.kubeconfig ./tools/deployment/provider_common/41_check_certificate_expiration.sh
-        - KUBECONFIG=/tmp/target-cluster.kubeconfig ./tools/deployment/provider_common/42_rotate_sa_token.sh
+        - ./tools/deployment/provider_common/34_deploy_worker_node.sh
     voting: false
 - job:
     name: airship-airshipctl-docker-kubebench-conformance