diff --git a/manifests/function/phase-helpers/get_node/kubectl_get_node.sh b/manifests/function/phase-helpers/get_node/kubectl_get_node.sh
new file mode 100644
index 000000000..1eac065cf
--- /dev/null
+++ b/manifests/function/phase-helpers/get_node/kubectl_get_node.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -xe
+
+kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT --request-timeout 10s get node 1>&2
diff --git a/manifests/function/phase-helpers/get_node/kustomization.yaml b/manifests/function/phase-helpers/get_node/kustomization.yaml
new file mode 100644
index 000000000..c5ce0a797
--- /dev/null
+++ b/manifests/function/phase-helpers/get_node/kustomization.yaml
@@ -0,0 +1,6 @@
+configMapGenerator:
+- name: kubectl-get-node
+  options:
+    disableNameSuffixHash: true
+  files:
+  - script=kubectl_get_node.sh
diff --git a/manifests/function/phase-helpers/get_pods/kubectl_get_pods.sh b/manifests/function/phase-helpers/get_pods/kubectl_get_pods.sh
index d5defa041..1ae64369a 100644
--- a/manifests/function/phase-helpers/get_pods/kubectl_get_pods.sh
+++ b/manifests/function/phase-helpers/get_pods/kubectl_get_pods.sh
@@ -12,4 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT get pods --all-namespaces 1>&2
+set -xe
+
+kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT --request-timeout 10s get pods --all-namespaces 1>&2
diff --git a/manifests/function/phase-helpers/kustomization.yaml b/manifests/function/phase-helpers/kustomization.yaml
index 94fada475..2f7539d3b 100644
--- a/manifests/function/phase-helpers/kustomization.yaml
+++ b/manifests/function/phase-helpers/kustomization.yaml
@@ -3,3 +3,5 @@ resources:
 - get_pods
 - wait_tigera
 - wait_deploy
+- get_node
+- wait_pods
diff --git a/manifests/function/phase-helpers/wait_node/kustomization.yaml b/manifests/function/phase-helpers/wait_node/kustomization.yaml
index 1427e2d9a..20606afde 100644
--- a/manifests/function/phase-helpers/wait_node/kustomization.yaml
+++ b/manifests/function/phase-helpers/wait_node/kustomization.yaml
@@ -1,5 +1,5 @@
 configMapGenerator:
-- name: kubectl-get-node
+- name: kubectl-wait-node
   options:
     disableNameSuffixHash: true
   files:
diff --git a/manifests/function/phase-helpers/wait_pods/kubectl_wait_pods.sh b/manifests/function/phase-helpers/wait_pods/kubectl_wait_pods.sh
new file mode 100644
index 000000000..07004826a
--- /dev/null
+++ b/manifests/function/phase-helpers/wait_pods/kubectl_wait_pods.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -xe
+
+kubectl --kubeconfig $KUBECONFIG --context $KCTL_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=600s 1>&2
diff --git a/manifests/function/phase-helpers/wait_pods/kustomization.yaml b/manifests/function/phase-helpers/wait_pods/kustomization.yaml
new file mode 100644
index 000000000..e45eba659
--- /dev/null
+++ b/manifests/function/phase-helpers/wait_pods/kustomization.yaml
@@ -0,0 +1,6 @@
+configMapGenerator:
+- name: kubectl-wait-pods
+  options:
+    disableNameSuffixHash: true
+  files:
+  - script=kubectl_wait_pods.sh
diff --git a/manifests/phases/executors.yaml b/manifests/phases/executors.yaml
index ec0ada0b3..550918779 100644
--- a/manifests/phases/executors.yaml
+++ b/manifests/phases/executors.yaml
@@ -276,6 +276,21 @@ config: |
 ---
 apiVersion: airshipit.org/v1alpha1
 kind: GenericContainer
+metadata:
+  name: kubectl-wait-node
+  labels:
+    airshipit.org/deploy-k8s: "false"
+spec:
+  type: krm
+  image: quay.io/airshipit/toolbox:latest
+  hostNetwork: true
+configRef:
+  kind: ConfigMap
+  name: kubectl-wait-node
+  apiVersion: v1
+---
+apiVersion: airshipit.org/v1alpha1
+kind: GenericContainer
 metadata:
   name: kubectl-get-node
   labels:
@@ -333,3 +348,18 @@ configRef:
   kind: ConfigMap
   name: kubectl-wait-deploy
   apiVersion: v1
+---
+apiVersion: airshipit.org/v1alpha1
+kind: GenericContainer
+metadata:
+  name: kubectl-wait-pods
+  labels:
+    airshipit.org/deploy-k8s: "false"
+spec:
+  type: krm
+  image: quay.io/airshipit/toolbox:latest
+  hostNetwork: true
+configRef:
+  kind: ConfigMap
+  name: kubectl-wait-pods
+  apiVersion: v1
diff --git a/manifests/phases/phases.yaml b/manifests/phases/phases.yaml
index afe7b50e6..71c93bad1 100644
--- a/manifests/phases/phases.yaml
+++ b/manifests/phases/phases.yaml
@@ -284,6 +284,17 @@ kind: Phase
 metadata:
   name: kubectl-wait-node-ephemeral
   clusterName: ephemeral-cluster
+config:
+  executorRef:
+    apiVersion: airshipit.org/v1alpha1
+    kind: GenericContainer
+    name: kubectl-wait-node
+---
+apiVersion: airshipit.org/v1alpha1
+kind: Phase
+metadata:
+  name: kubectl-get-node-target
+  clusterName: target-cluster
 config:
   executorRef:
     apiVersion: airshipit.org/v1alpha1
@@ -303,6 +314,17 @@ config:
 ---
 apiVersion: airshipit.org/v1alpha1
 kind: Phase
+metadata:
+  name: kubectl-get-pods-target
+  clusterName: target-cluster
+config:
+  executorRef:
+    apiVersion: airshipit.org/v1alpha1
+    kind: GenericContainer
+    name: kubectl-get-pods
+---
+apiVersion: airshipit.org/v1alpha1
+kind: Phase
 metadata:
   name: kubectl-wait-tigera-ephemeral
   clusterName: ephemeral-cluster
@@ -314,6 +336,17 @@ config:
 ---
 apiVersion: airshipit.org/v1alpha1
 kind: Phase
+metadata:
+  name: kubectl-wait-tigera-target
+  clusterName: target-cluster
+config:
+  executorRef:
+    apiVersion: airshipit.org/v1alpha1
+    kind: GenericContainer
+    name: kubectl-wait-tigera
+---
+apiVersion: airshipit.org/v1alpha1
+kind: Phase
 metadata:
   name: kubectl-wait-deploy-ephemeral
   clusterName: ephemeral-cluster
@@ -322,3 +355,14 @@ config:
     apiVersion: airshipit.org/v1alpha1
     kind: GenericContainer
     name: kubectl-wait-deploy
+---
+apiVersion: airshipit.org/v1alpha1
+kind: Phase
+metadata:
+  name: kubectl-wait-pods-target
+  clusterName: target-cluster
+config:
+  executorRef:
+    apiVersion: airshipit.org/v1alpha1
+    kind: GenericContainer
+    name: kubectl-wait-pods
diff --git a/tools/deployment/30_deploy_controlplane.sh b/tools/deployment/30_deploy_controlplane.sh
index e3d75ee36..47bc80fa5 100755
--- a/tools/deployment/30_deploy_controlplane.sh
+++ b/tools/deployment/30_deploy_controlplane.sh
@@ -15,8 +15,6 @@
 set -ex
 
 EPHEMERAL_DOMAIN_NAME="air-ephemeral"
-export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
-export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
 
 # TODO (dukov) this is needed due to sushy tools inserts cdrom image to
 # all vms. This can be removed once sushy tool is fixed
@@ -35,17 +33,13 @@ echo "Create target k8s cluster resources"
 airshipctl phase run controlplane-ephemeral --debug
 
 echo "List all nodes in target cluster"
-kubectl \
-  --kubeconfig $KUBECONFIG \
-  --context $KUBECONFIG_TARGET_CONTEXT \
-  --request-timeout 10s \
-  get node
-
+# Scripts for this phase placed in manifests/function/phase-helpers/wait_node/
+# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
+# and find ConfigMap with name kubectl-get-node
+airshipctl phase run kubectl-get-node-target --debug
 
 echo "List all pods in target cluster"
-kubectl \
-  --kubeconfig  $KUBECONFIG \
-  --context $KUBECONFIG_TARGET_CONTEXT \
-  --request-timeout 10s \
-  get pods \
-  --all-namespaces
+# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
+# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
+# and find ConfigMap with name kubectl-get-pods
+airshipctl phase run kubectl-get-pods-target --debug
diff --git a/tools/deployment/31_deploy_initinfra_target_node.sh b/tools/deployment/31_deploy_initinfra_target_node.sh
index 09c7f715b..8ab693203 100755
--- a/tools/deployment/31_deploy_initinfra_target_node.sh
+++ b/tools/deployment/31_deploy_initinfra_target_node.sh
@@ -14,39 +14,20 @@
 
 set -xe
 
-export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
-export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
-
 echo "Deploy calico using tigera operator"
 airshipctl phase run initinfra-networking-target --debug
 
-echo "Wait for Calico to be deployed using tigera"
-kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=600s
-
-echo "Wait for Established condition of tigerastatus(CRD) to be true for tigerastatus(CR) to show up"
-kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --for=condition=Established crd/tigerastatuses.operator.tigera.io --timeout=300s
-
-# Wait till CR(tigerastatus) is available
-count=0
-max_retry_attempts=150
-until [[ $(kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get tigerastatus 2>/dev/null) ]]; do
-  count=$((count + 1))
-  if [[ ${count} -eq "${max_retry_attempts}" ]]; then
-    echo ' Timed out waiting for tigerastatus'
-    exit 1
-  fi
-  sleep 2
-done
-
-# Wait till condition is available for tigerastatus
-kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --for=condition=Available tigerastatus --all --timeout=1000s
+# Wait for Calico to be deployed using tigera
+# Scripts for this phase placed in manifests/function/phase-helpers/wait_tigera/
+# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
+# and find ConfigMap with name kubectl-wait_tigera
+airshipctl phase run kubectl-wait-tigera-target --debug
 
 echo "Deploy infra to cluster"
 airshipctl phase run initinfra-target --debug
 
 echo "List all pods"
-kubectl \
-  --kubeconfig $KUBECONFIG \
-  --context $KUBECONFIG_TARGET_CONTEXT \
-   get pods \
-  --all-namespaces
+# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
+# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
+# and find ConfigMap with name kubectl-get-pods
+airshipctl phase run kubectl-get-pods-target
diff --git a/tools/deployment/32_cluster_init_target_node.sh b/tools/deployment/32_cluster_init_target_node.sh
index fcd7f7101..3955ed324 100755
--- a/tools/deployment/32_cluster_init_target_node.sh
+++ b/tools/deployment/32_cluster_init_target_node.sh
@@ -14,12 +14,16 @@
 
 set -xe
 
-export KUBECONFIG=${KUBECONFIG:-"$HOME/.airship/kubeconfig"}
-export KUBECONFIG_TARGET_CONTEXT=${KUBECONFIG_TARGET_CONTEXT:-"target-cluster"}
-
 echo "Deploy CAPI components to target cluster"
 airshipctl phase run clusterctl-init-target --debug
 
 echo "Waiting for pods to be ready"
-kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT wait --all-namespaces --for=condition=Ready pods --all --timeout=600s
-kubectl --kubeconfig $KUBECONFIG --context $KUBECONFIG_TARGET_CONTEXT get pods --all-namespaces
+# Scripts for this phase placed in manifests/function/phase-helpers/wait_pods/
+# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
+# and find ConfigMap with name kubectl-wait-pods
+airshipctl phase run kubectl-wait-pods-target --debug
+
+# Scripts for this phase placed in manifests/function/phase-helpers/get_pods/
+# To get ConfigMap for this phase, execute `airshipctl phase render --source config -k ConfigMap`
+# and find ConfigMap with name kubectl-get-pods
+airshipctl phase run kubectl-get-pods-target --debug