diff --git a/go.mod b/go.mod
index c239cf044..c70185b30 100644
--- a/go.mod
+++ b/go.mod
@@ -20,6 +20,7 @@ require (
 	github.com/pkg/errors v0.9.1
 	github.com/spf13/cobra v1.0.0
 	github.com/stretchr/testify v1.4.0
+	golang.org/x/tools v0.0.0-20200619210111-0f592d2728bb // indirect
 	k8s.io/api v0.17.4
 	k8s.io/apiextensions-apiserver v0.17.4
 	k8s.io/apimachinery v0.17.4
diff --git a/go.sum b/go.sum
index ffe85f148..cd556d7aa 100644
--- a/go.sum
+++ b/go.sum
@@ -1300,6 +1300,8 @@ golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e/go.mod h1:Sl4aGygMT6LrqrWc
 golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200428211428-0c9eba77bc32 h1:Xvf3ZQTm5bjXPxhI7g+dwqsCqadK1rcNtwtszuatetk=
 golang.org/x/tools v0.0.0-20200428211428-0c9eba77bc32/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619210111-0f592d2728bb h1:/7SQoPdMxZ0c/Zu9tBJgMbRE/BmK6i9QXflNJXKAmw0=
+golang.org/x/tools v0.0.0-20200619210111-0f592d2728bb/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
diff --git a/pkg/cluster/status.go b/pkg/cluster/status.go
index f31ea7428..c8656582c 100644
--- a/pkg/cluster/status.go
+++ b/pkg/cluster/status.go
@@ -15,32 +15,31 @@
 package cluster
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 
 	apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	"k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/api/meta"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime/schema"
+	"sigs.k8s.io/cli-utils/pkg/kstatus/polling/event"
+	"sigs.k8s.io/cli-utils/pkg/kstatus/status"
+	"sigs.k8s.io/cli-utils/pkg/object"
 
 	"opendev.org/airship/airshipctl/pkg/document"
 	"opendev.org/airship/airshipctl/pkg/k8s/client"
 )
 
-// A Status represents a kubernetes resource's state.
-type Status string
-
-// These represent the default statuses
-const (
-	UnknownStatus = Status("Unknown")
-)
-
 // StatusMap holds a mapping of schema.GroupVersionResource to various statuses
 // a resource may be in, as well as the Expression used to check for that
 // status.
 type StatusMap struct {
 	client     client.Interface
-	mapping    map[schema.GroupVersionResource]map[Status]Expression
+	GkMapping  []schema.GroupKind
+	mapping    map[schema.GroupVersionResource]map[status.Status]Expression
 	restMapper *meta.DefaultRESTMapper
 }
 
@@ -52,10 +51,10 @@ type StatusMap struct {
 func NewStatusMap(client client.Interface) (*StatusMap, error) {
 	statusMap := &StatusMap{
 		client:     client,
-		mapping:    make(map[schema.GroupVersionResource]map[Status]Expression),
+		mapping:    make(map[schema.GroupVersionResource]map[status.Status]Expression),
 		restMapper: meta.NewDefaultRESTMapper([]schema.GroupVersion{}),
 	}
-
+	client.ApiextensionsClientSet()
 	crds, err := statusMap.client.ApiextensionsClientSet().
 		ApiextensionsV1().
 		CustomResourceDefinitions().
@@ -73,9 +72,84 @@ func NewStatusMap(client client.Interface) (*StatusMap, error) {
 	return statusMap, nil
 }
 
+// ReadStatus returns object status
+func (sm *StatusMap) ReadStatus(ctx context.Context, resource object.ObjMetadata) *event.ResourceStatus {
+	gk := resource.GroupKind
+	gvr, err := sm.restMapper.RESTMapping(gk, "v1")
+	if err != nil {
+		return handleResourceStatusError(resource, err)
+	}
+	options := metav1.GetOptions{}
+	object, err := sm.client.DynamicClient().Resource(gvr.Resource).
+		Namespace(resource.Namespace).Get(resource.Name, options)
+	if err != nil {
+		return handleResourceStatusError(resource, err)
+	}
+	return sm.ReadStatusForObject(ctx, object)
+}
+
+// ReadStatusForObject returns resource status for object.
+// Current status will be returned only if expression matched.
+func (sm *StatusMap) ReadStatusForObject(
+	ctx context.Context, resource *unstructured.Unstructured) *event.ResourceStatus {
+	identifier := object.ObjMetadata{
+		GroupKind: resource.GroupVersionKind().GroupKind(),
+		Name:      resource.GetName(),
+		Namespace: resource.GetNamespace(),
+	}
+	gvk := resource.GroupVersionKind()
+	restMapping, err := sm.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+	if err != nil {
+		return &event.ResourceStatus{
+			Identifier: identifier,
+			Status:     status.UnknownStatus,
+			Error:      err,
+		}
+	}
+
+	gvr := restMapping.Resource
+
+	obj, err := sm.client.DynamicClient().Resource(gvr).Namespace(resource.GetNamespace()).
+		Get(resource.GetName(), metav1.GetOptions{})
+	if err != nil {
+		return &event.ResourceStatus{
+			Identifier: identifier,
+			Status:     status.UnknownStatus,
+			Error:      err,
+		}
+	}
+
+	// No need to check for existence - if there isn't a mapping for this
+	// resource, the following for loop won't run anyway
+	for currentstatus, expression := range sm.mapping[gvr] {
+		var matched bool
+		matched, err = expression.Match(obj)
+		if err != nil {
+			return &event.ResourceStatus{
+				Identifier: identifier,
+				Status:     status.UnknownStatus,
+				Error:      err,
+			}
+		}
+		if matched {
+			return &event.ResourceStatus{
+				Identifier: identifier,
+				Status:     currentstatus,
+				Resource:   resource,
+				Message:    fmt.Sprintf("%s is %s", resource.GroupVersionKind().Kind, currentstatus.String()),
+			}
+		}
+	}
+	return &event.ResourceStatus{
+		Identifier: identifier,
+		Status:     status.UnknownStatus,
+		Error:      nil,
+	}
+}
+
 // GetStatusForResource iterates over all of the stored conditions for the
 // resource and returns the first status whose conditions are met.
-func (sm *StatusMap) GetStatusForResource(resource document.Document) (Status, error) {
+func (sm *StatusMap) GetStatusForResource(resource document.Document) (status.Status, error) {
 	gvk := getGVK(resource)
 
 	restMapping, err := sm.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
@@ -103,7 +177,7 @@ func (sm *StatusMap) GetStatusForResource(resource document.Document) (Status, e
 		}
 	}
 
-	return UnknownStatus, nil
+	return status.UnknownStatus, nil
 }
 
 // addCRD adds the mappings from the CRD to its associated statuses
@@ -122,6 +196,7 @@ func (sm *StatusMap) addCRD(crd apiextensions.CustomResourceDefinition) error {
 
 	gvrs := getGVRs(crd)
 	for _, gvr := range gvrs {
+		sm.GkMapping = append(sm.GkMapping, crd.GroupVersionKind().GroupKind())
 		gvk := gvr.GroupVersion().WithKind(crd.Spec.Names.Kind)
 		gvrSingular := gvr.GroupVersion().WithResource(crd.Spec.Names.Singular)
 		sm.mapping[gvr] = statusChecks
@@ -159,7 +234,7 @@ func getGVK(doc document.Document) schema.GroupVersionKind {
 // parseStatusChecks takes a string containing a map of status names (e.g.
 // Healthy) to the JSONPath filters associated with the statuses, and returns
 // the Go object equivalent.
-func parseStatusChecks(raw string) (map[Status]Expression, error) {
+func parseStatusChecks(raw string) (map[status.Status]Expression, error) {
 	type statusCheckType struct {
 		Status    string `json:"status"`
 		Condition string `json:"condition"`
@@ -172,7 +247,7 @@ func parseStatusChecks(raw string) (map[Status]Expression, error) {
 		}
 	}
 
-	expressionMap := make(map[Status]Expression)
+	expressionMap := make(map[status.Status]Expression)
 	for _, mapping := range mappings {
 		if mapping.Status == "" {
 			return nil, ErrInvalidStatusCheck{What: "missing status field"}
@@ -182,8 +257,25 @@ func parseStatusChecks(raw string) (map[Status]Expression, error) {
 			return nil, ErrInvalidStatusCheck{What: "missing condition field"}
 		}
 
-		expressionMap[Status(mapping.Status)] = Expression{Condition: mapping.Condition}
+		expressionMap[status.Status(mapping.Status)] = Expression{Condition: mapping.Condition}
 	}
 
 	return expressionMap, nil
 }
+
+// handleResourceStatusError construct the appropriate ResourceStatus
+// object based on the type of error.
+func handleResourceStatusError(identifier object.ObjMetadata, err error) *event.ResourceStatus {
+	if errors.IsNotFound(err) {
+		return &event.ResourceStatus{
+			Identifier: identifier,
+			Status:     status.NotFoundStatus,
+			Message:    "Resource not found",
+		}
+	}
+	return &event.ResourceStatus{
+		Identifier: identifier,
+		Status:     status.UnknownStatus,
+		Error:      err,
+	}
+}
diff --git a/pkg/cluster/status_test.go b/pkg/cluster/status_test.go
index 6289f2cff..d381018f9 100644
--- a/pkg/cluster/status_test.go
+++ b/pkg/cluster/status_test.go
@@ -15,14 +15,17 @@
 package cluster_test
 
 import (
+	"context"
 	"testing"
 
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
-
 	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"sigs.k8s.io/cli-utils/pkg/kstatus/status"
+	"sigs.k8s.io/cli-utils/pkg/object"
 
 	"opendev.org/airship/airshipctl/pkg/cluster"
 	"opendev.org/airship/airshipctl/pkg/document"
@@ -76,7 +79,7 @@ func TestGetStatusForResource(t *testing.T) {
 		name           string
 		selector       document.Selector
 		client         *fake.Client
-		expectedStatus cluster.Status
+		expectedStatus status.Status
 		err            error
 	}{
 		{
@@ -86,9 +89,9 @@ func TestGetStatusForResource(t *testing.T) {
 				ByName("stable-resource"),
 			client: fake.NewClient(
 				fake.WithCRDs(makeResourceCRD(annotationValidStatusCheck())),
-				fake.WithDynamicObjects(makeResource("Resource", "stable-resource", "stable")),
+				fake.WithDynamicObjects(makeResource("stable-resource", "stable")),
 			),
-			expectedStatus: cluster.Status("Stable"),
+			expectedStatus: status.Status("Stable"),
 		},
 		{
 			name: "pending-resource-is-pending",
@@ -97,9 +100,9 @@ func TestGetStatusForResource(t *testing.T) {
 				ByName("pending-resource"),
 			client: fake.NewClient(
 				fake.WithCRDs(makeResourceCRD(annotationValidStatusCheck())),
-				fake.WithDynamicObjects(makeResource("Resource", "pending-resource", "pending")),
+				fake.WithDynamicObjects(makeResource("pending-resource", "pending")),
 			),
-			expectedStatus: cluster.Status("Pending"),
+			expectedStatus: status.Status("Pending"),
 		},
 		{
 			name: "unknown-resource-is-unknown",
@@ -108,9 +111,9 @@ func TestGetStatusForResource(t *testing.T) {
 				ByName("unknown"),
 			client: fake.NewClient(
 				fake.WithCRDs(makeResourceCRD(annotationValidStatusCheck())),
-				fake.WithDynamicObjects(makeResource("Resource", "unknown", "unknown")),
+				fake.WithDynamicObjects(makeResource("unknown", "unknown")),
 			),
-			expectedStatus: cluster.UnknownStatus,
+			expectedStatus: status.UnknownStatus,
 		},
 		{
 			name: "missing-resource-returns-error",
@@ -146,11 +149,23 @@ func TestGetStatusForResource(t *testing.T) {
 	}
 }
 
-func makeResource(kind, name, state string) *unstructured.Unstructured {
+func TestReadStatus(t *testing.T) {
+	c := fake.NewClient(fake.WithCRDs(makeResourceCRD(annotationValidStatusCheck())),
+		fake.WithDynamicObjects(makeResource("pending-resource", "pending")))
+	statusMap, err := cluster.NewStatusMap(c)
+	require.NoError(t, err)
+	ctx := context.Background()
+	resource := object.ObjMetadata{Namespace: "default",
+		Name: "pending-resource", GroupKind: schema.GroupKind{Group: "example.com", Kind: "Resource"}}
+	result := statusMap.ReadStatus(ctx, resource)
+	assert.Equal(t, "Pending", result.Status.String())
+}
+
+func makeResource(name, state string) *unstructured.Unstructured {
 	return &unstructured.Unstructured{
 		Object: map[string]interface{}{
 			"apiVersion": "example.com/v1",
-			"kind":       kind,
+			"kind":       "Resource",
 			"metadata": map[string]interface{}{
 				"name":      name,
 				"namespace": "default",
diff --git a/pkg/k8s/poller/poller.go b/pkg/k8s/poller/poller.go
new file mode 100644
index 000000000..a996f9f37
--- /dev/null
+++ b/pkg/k8s/poller/poller.go
@@ -0,0 +1,99 @@
+/*
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package poller
+
+import (
+	"context"
+
+	appsv1 "k8s.io/api/apps/v1"
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"sigs.k8s.io/cli-utils/pkg/kstatus/polling"
+	"sigs.k8s.io/cli-utils/pkg/kstatus/polling/clusterreader"
+	"sigs.k8s.io/cli-utils/pkg/kstatus/polling/engine"
+	"sigs.k8s.io/cli-utils/pkg/kstatus/polling/event"
+	"sigs.k8s.io/cli-utils/pkg/kstatus/polling/statusreaders"
+	"sigs.k8s.io/cli-utils/pkg/object"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+
+	"opendev.org/airship/airshipctl/pkg/cluster"
+)
+
+// NewStatusPoller creates a new StatusPoller using the given clusterreader and mapper. The StatusPoller
+// will use the client for all calls to the cluster.
+func NewStatusPoller(reader client.Reader, mapper meta.RESTMapper, statusmap *cluster.StatusMap) *StatusPoller {
+	return &StatusPoller{
+		engine: &engine.PollerEngine{
+			Reader: reader,
+			Mapper: mapper,
+		},
+		statusmap: statusmap,
+	}
+}
+
+// StatusPoller provides functionality for polling a cluster for status for a set of resources.
+type StatusPoller struct {
+	engine    *engine.PollerEngine
+	statusmap *cluster.StatusMap
+}
+
+// Poll will create a new statusPollerRunner that will poll all the resources provided and report their status
+// back on the event channel returned. The statusPollerRunner can be canceled at any time by canceling the
+// context passed in.
+func (s *StatusPoller) Poll(
+	ctx context.Context, identifiers []object.ObjMetadata, options polling.Options) <-chan event.Event {
+	return s.engine.Poll(ctx, identifiers, engine.Options{
+		PollInterval:             options.PollInterval,
+		ClusterReaderFactoryFunc: clusterReaderFactoryFunc(options.UseCache),
+		StatusReadersFactoryFunc: s.createStatusReaders,
+	})
+}
+
+// createStatusReaders creates an instance of all the statusreaders. This includes a set of statusreaders for
+// a particular GroupKind, and a default engine used for all resource types that does not have
+// a specific statusreaders.
+// TODO: We should consider making the registration more automatic instead of having to create each of them
+// here. Also, it might be worth creating them on demand.
+func (s *StatusPoller) createStatusReaders(reader engine.ClusterReader, mapper meta.RESTMapper) (
+	map[schema.GroupKind]engine.StatusReader, engine.StatusReader) {
+	defaultStatusReader := statusreaders.NewGenericStatusReader(reader, mapper)
+	replicaSetStatusReader := statusreaders.NewReplicaSetStatusReader(reader, mapper, defaultStatusReader)
+	deploymentStatusReader := statusreaders.NewDeploymentResourceReader(reader, mapper, replicaSetStatusReader)
+	statefulSetStatusReader := statusreaders.NewStatefulSetResourceReader(reader, mapper, defaultStatusReader)
+
+	statusReaders := map[schema.GroupKind]engine.StatusReader{
+		appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind():  deploymentStatusReader,
+		appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(): statefulSetStatusReader,
+		appsv1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind():  replicaSetStatusReader,
+	}
+	for _, gk := range s.statusmap.GkMapping {
+		statusReaders[gk] = s.statusmap
+	}
+	return statusReaders, defaultStatusReader
+}
+
+// clusterReaderFactoryFunc returns a factory function for creating an instance of a ClusterReader.
+// This function is used by the StatusPoller to create a ClusterReader for each StatusPollerRunner.
+// The decision for which implementation of the ClusterReader interface that should be used are
+// decided here rather than based on information passed in to the factory function. Thus, the decision
+// for which implementation is decided when the StatusPoller is created.
+func clusterReaderFactoryFunc(useCache bool) engine.ClusterReaderFactoryFunc {
+	return func(r client.Reader, mapper meta.RESTMapper, identifiers []object.ObjMetadata) (engine.ClusterReader, error) {
+		if useCache {
+			return clusterreader.NewCachingClusterReader(r, mapper, identifiers)
+		}
+		return &clusterreader.DirectClusterReader{Reader: r}, nil
+	}
+}
diff --git a/pkg/k8s/poller/poller_test.go b/pkg/k8s/poller/poller_test.go
new file mode 100755
index 000000000..7a4929a4c
--- /dev/null
+++ b/pkg/k8s/poller/poller_test.go
@@ -0,0 +1,52 @@
+/*
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+     https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package poller_test
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+
+	"opendev.org/airship/airshipctl/pkg/cluster"
+	"opendev.org/airship/airshipctl/pkg/config"
+	"opendev.org/airship/airshipctl/pkg/environment"
+	"opendev.org/airship/airshipctl/pkg/k8s/client/fake"
+	"opendev.org/airship/airshipctl/pkg/k8s/poller"
+	k8sutils "opendev.org/airship/airshipctl/pkg/k8s/utils"
+)
+
+func TestNewStatusPoller(t *testing.T) {
+	settings := &environment.AirshipCTLSettings{
+		Debug:          true,
+		Config:         config.NewConfig(),
+		KubeConfigPath: "testdata/kubeconfig.yaml",
+	}
+	airClient := fake.NewClient()
+
+	f := k8sutils.FactoryFromKubeConfigPath(settings.KubeConfigPath)
+	restConfig, err := f.ToRESTConfig()
+	require.NoError(t, err)
+	restMapper, err := f.ToRESTMapper()
+	require.NoError(t, err)
+	restClient, err := client.New(restConfig, client.Options{Mapper: restMapper})
+	require.NoError(t, err)
+	statusmap, err := cluster.NewStatusMap(airClient)
+	require.NoError(t, err)
+
+	a := poller.NewStatusPoller(restClient, restMapper, statusmap)
+	assert.NotNil(t, a)
+}
diff --git a/pkg/k8s/poller/testdata/kubeconfig.yaml b/pkg/k8s/poller/testdata/kubeconfig.yaml
new file mode 100755
index 000000000..967864a76
--- /dev/null
+++ b/pkg/k8s/poller/testdata/kubeconfig.yaml
@@ -0,0 +1,19 @@
+apiVersion: v1
+clusters:
+- cluster:
+    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1Ea3lPVEUzTURNd09Wb1hEVEk1TURreU5qRTNNRE13T1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTUZyCkdxM0kyb2dZci81Y01Udy9Na1pORTNWQURzdEdyU240WjU2TDhPUGhMcUhDN2t1dno2dVpES3dCSGtGeTBNK2MKRXIzd2piUGE1aTV5NmkyMGtxSHBVMjdPZTA0dzBXV2s4N0RSZVlWaGNoZVJHRXoraWt3SndIcGRmMjJVemZNKwpkSDBzaUhuMVd6UnovYk4za3hMUzJlMnZ2U1Y3bmNubk1YRUd4OXV0MUY0NThHeWxxdmxXTUlWMzg5Q2didXFDCkcwcFdiMTBLM0RVZWdiT25Xa1FmSm5sTWRRVVZDUVdZZEZaaklrcWtkWi9hVTRobkNEV01oZXNWRnFNaDN3VVAKczhQay9BNWh1ZFFPbnFRNDVIWXZLdjZ5RjJWcDUyWExBRUx3NDJ4aVRKZlh0V1h4eHR6cU4wY1lyL2VxeS9XMQp1YVVGSW5xQjFVM0JFL1oxbmFrQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKUUVKQVBLSkFjVDVuK3dsWGJsdU9mS0J3c2gKZTI4R1c5R2QwM0N0NGF3RzhzMXE1ZHNua2tpZmVTUENHVFZ1SXF6UTZDNmJaSk9SMDMvVEl5ejh6NDJnaitDVApjWUZXZkltM2RKTnpRL08xWkdySXZZNWdtcWJtWDlpV0JaU24rRytEOGxubzd2aGMvY0tBRFR5OTMvVU92MThuCkdhMnIrRGJJcHcyTWVBVEl2elpxRS9RWlVSQ25DMmdjUFhTVzFqN2h4R3o1a3ZNcGVDZTdQYVUvdVFvblVHSWsKZ2t6ZzI4NHQvREhUUzc4N1V1SUg5cXBaV09yTFNMOGFBeUxQUHhWSXBteGZmbWRETE9TS2VUemRlTmxoSitUMwowQlBVaHBQTlJBNTNJN0hRQjhVUDR2elNONTkzZ1VFbVlFQ2Jic2RYSzB6ZVR6SDdWWHR2Zmd5WTVWWT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+    server: https://10.0.1.7:6443
+  name: kubernetes_target
+contexts:
+- context:
+    cluster: kubernetes_target
+    user: kubernetes-admin
+  name: kubernetes-admin@kubernetes
+current-context: ""
+kind: Config
+preferences: {}
+users:
+- name: kubernetes-admin
+  user:
+    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJQXhEdzk2RUY4SXN3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4T1RBNU1qa3hOekF6TURsYUZ3MHlNREE1TWpneE56QXpNVEphTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXV6R0pZdlBaNkRvaTQyMUQKSzhXSmFaQ25OQWQycXo1cC8wNDJvRnpRUGJyQWd6RTJxWVZrek9MOHhBVmVSN1NONXdXb1RXRXlGOEVWN3JyLwo0K0hoSEdpcTVQbXF1SUZ5enpuNi9JWmM4alU5eEVmenZpa2NpckxmVTR2UlhKUXdWd2dBU05sMkFXQUloMmRECmRUcmpCQ2ZpS1dNSHlqMFJiSGFsc0J6T3BnVC9IVHYzR1F6blVRekZLdjJkajVWMU5rUy9ESGp5UlJKK0VMNlEKQlltR3NlZzVQNE5iQzllYnVpcG1NVEFxL0p1bU9vb2QrRmpMMm5acUw2Zkk2ZkJ0RjVPR2xwQ0IxWUo4ZnpDdApHUVFaN0hUSWJkYjJ0cDQzRlZPaHlRYlZjSHFUQTA0UEoxNSswV0F5bVVKVXo4WEE1NDRyL2J2NzRKY0pVUkZoCmFyWmlRd0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFMMmhIUmVibEl2VHJTMFNmUVg1RG9ueVVhNy84aTg1endVWApSd3dqdzFuS0U0NDJKbWZWRGZ5b0hRYUM4Ti9MQkxyUXM0U0lqU1JYdmFHU1dSQnRnT1RRV21Db1laMXdSbjdwCndDTXZQTERJdHNWWm90SEZpUFl2b1lHWFFUSXA3YlROMmg1OEJaaEZ3d25nWUovT04zeG1rd29IN1IxYmVxWEYKWHF1TTluekhESk41VlZub1lQR09yRHMwWlg1RnNxNGtWVU0wVExNQm9qN1ZIRDhmU0E5RjRYNU4yMldsZnNPMAo4aksrRFJDWTAyaHBrYTZQQ0pQS0lNOEJaMUFSMG9ZakZxT0plcXpPTjBqcnpYWHh4S2pHVFVUb1BldVA5dCtCCjJOMVA1TnI4a2oxM0lrend5Q1NZclFVN09ZM3ltZmJobHkrcXZxaFVFa014MlQ1SkpmQT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdXpHSll2UFo2RG9pNDIxREs4V0phWkNuTkFkMnF6NXAvMDQyb0Z6UVBickFnekUyCnFZVmt6T0w4eEFWZVI3U041d1dvVFdFeUY4RVY3cnIvNCtIaEhHaXE1UG1xdUlGeXp6bjYvSVpjOGpVOXhFZnoKdmlrY2lyTGZVNHZSWEpRd1Z3Z0FTTmwyQVdBSWgyZERkVHJqQkNmaUtXTUh5ajBSYkhhbHNCek9wZ1QvSFR2MwpHUXpuVVF6Rkt2MmRqNVYxTmtTL0RIanlSUkorRUw2UUJZbUdzZWc1UDROYkM5ZWJ1aXBtTVRBcS9KdW1Pb29kCitGakwyblpxTDZmSTZmQnRGNU9HbHBDQjFZSjhmekN0R1FRWjdIVEliZGIydHA0M0ZWT2h5UWJWY0hxVEEwNFAKSjE1KzBXQXltVUpVejhYQTU0NHIvYnY3NEpjSlVSRmhhclppUXdJREFRQUJBb0lCQVFDU0pycjlaeVpiQ2dqegpSL3VKMFZEWCt2aVF4c01BTUZyUjJsOE1GV3NBeHk1SFA4Vk4xYmc5djN0YUVGYnI1U3hsa3lVMFJRNjNQU25DCm1uM3ZqZ3dVQWlScllnTEl5MGk0UXF5VFBOU1V4cnpTNHRxTFBjM3EvSDBnM2FrNGZ2cSsrS0JBUUlqQnloamUKbnVFc1JpMjRzT3NESlM2UDE5NGlzUC9yNEpIM1M5bFZGbkVuOGxUR2c0M1kvMFZoMXl0cnkvdDljWjR5ZUNpNwpjMHFEaTZZcXJZaFZhSW9RRW1VQjdsbHRFZkZzb3l4VDR6RTE5U3pVbkRoMmxjYTF1TzhqcmI4d2xHTzBoQ2JyClB1R1l2WFFQa3Q0VlNmalhvdGJ3d2lBNFRCVERCRzU1bHp6MmNKeS9zSS8zSHlYbEMxcTdXUmRuQVhhZ1F0VzkKOE9DZGRkb0JBb0dCQU5NcUNtSW94REtyckhZZFRxT1M1ZFN4cVMxL0NUN3ZYZ0pScXBqd2Y4WHA2WHo0KzIvTAozVXFaVDBEL3dGTkZkc1Z4eFYxMnNYMUdwMHFWZVlKRld5OVlCaHVSWGpTZ0ZEWldSY1Z1Y01sNVpPTmJsbmZGCjVKQ0xnNXFMZ1g5VTNSRnJrR3A0R241UDQxamg4TnhKVlhzZG5xWE9xNTFUK1RRT1UzdkpGQjc1QW9HQkFPTHcKalp1cnZtVkZyTHdaVGgvRDNpWll5SVV0ZUljZ2NKLzlzbTh6L0pPRmRIbFd4dGRHUFVzYVd1MnBTNEhvckFtbgpqTm4vSTluUXd3enZ3MWUzVVFPbUhMRjVBczk4VU5hbk5TQ0xNMW1yaXZHRXJ1VHFnTDM1bU41eFZPdTUxQU5JCm4yNkFtODBJT2JDeEtLa0R0ZXJSaFhHd3g5c1pONVJCbG9VRThZNGJBb0dBQ3ZsdVhMZWRxcng5VkE0bDNoNXUKVDJXRVUxYjgxZ1orcmtRc1I1S0lNWEw4cllBTElUNUpHKzFuendyN3BkaEFXZmFWdVV2SDRhamdYT0h6MUs5aQpFODNSVTNGMG9ldUg0V01PY1RwU0prWm0xZUlXcWRiaEVCb1FGdUlWTXRib1BsV0d4ZUhFRHJoOEtreGp4aThSCmdEcUQyajRwY1IzQ0g5QjJ5a0lqQjVFQ2dZRUExc0xXLys2enE1c1lNSm14K1JXZThhTXJmL3pjQnVTSU1LQWgKY0dNK0wwMG9RSHdDaUU4TVNqcVN1ajV3R214YUFuanhMb3ZwSFlRV1VmUEVaUW95UE1YQ2VhRVBLOU4xbk8xMwp0V2lHRytIZkIxaU5PazFCc0lhNFNDbndOM1FRVTFzeXBaeEgxT3hueS9LYmkvYmEvWEZ5VzNqMGFUK2YvVWxrCmJGV1ZVdWtDZ1lFQTBaMmRTTFlmTjV5eFNtYk5xMWVqZXdWd1BjRzQxR2hQclNUZEJxdHFac1doWGE3aDdLTWEKeHdvamh5SXpnTXNyK2tXODdlajhDQ2h0d21sQ1p5QU92QmdOZytncnJ1cEZLM3FOSkpKeU9YREdHckdpbzZmTQp5aXB3Q2tZVGVxRThpZ1J6UkI5QkdFUGY4eVpjMUtwdmZhUDVhM0lRZmxiV0czbGpUemNNZVZjPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
diff --git a/pkg/phase/apply/apply_test.go b/pkg/phase/apply/apply_test.go
index 3cd43ca2f..3d0ebaa5a 100644
--- a/pkg/phase/apply/apply_test.go
+++ b/pkg/phase/apply/apply_test.go
@@ -117,7 +117,6 @@ func TestDeploy(t *testing.T) {
 // makeNewFakeRootSettings takes kubeconfig path and directory path to fixture dir as argument.
 func makeNewFakeRootSettings(t *testing.T, kp string, dir string) *environment.AirshipCTLSettings {
 	t.Helper()
-
 	akp, err := filepath.Abs(kp)
 	require.NoError(t, err)