Refactor scheduling labels

This removes the `sip.airshipit.org/scheduled` label, and SIP now
relies solely on the presence/absense of the `sip.airshipit.org/cluster`
(renamed from `sip.airshipit.org/workload-cluster`) label to determine
which if any sip cluster a BMH is already scheduled to.

Closes #7

Signed-off-by: Sean Eagan <seaneagan1@gmail.com>
Change-Id: I10f0c3a0cf420e41916fe9c1144fab9df21f3b97
This commit is contained in:
Sean Eagan 2021-03-12 11:14:42 -06:00
parent 58faa0d491
commit 91def87243
9 changed files with 77 additions and 100 deletions

View File

@ -4,7 +4,6 @@ metadata:
name: rdm9r006o002
labels:
vino.airshipit.org/flavor: control-plane
sip.airshipit.org/scheduled: "false"
sip.airshipit.org/rack: r006
sip.airshipit.org/server: rdm9r006o002
spec:
@ -26,7 +25,6 @@ metadata:
name: rdm9r006o001
labels:
vino.airshipit.org/flavor: control-plane
sip.airshipit.org/scheduled: "false"
sip.airshipit.org/rack: r006
sip.airshipit.org/server: rdm9r006o001
spec:
@ -48,7 +46,6 @@ metadata:
name: rdm9r007o001
labels:
vino.airshipit.org/flavor: control-plane
sip.airshipit.org/scheduled: "false"
sip.airshipit.org/rack: r007
sip.airshipit.org/server: rdm9r007o001
spec:
@ -70,7 +67,6 @@ metadata:
name: rdm9r007o002
labels:
vino.airshipit.org/flavor: worker
sip.airshipit.org/scheduled: "false"
sip.airshipit.org/rack: r007
sip.airshipit.org/server: rdm9r007o002
spec:
@ -92,7 +88,6 @@ metadata:
name: rdm9r008o002
labels:
vino.airshipit.org/flavor: worker
sip.airshipit.org/scheduled: "false"
sip.airshipit.org/rack: r008
sip.airshipit.org/server: rdm9r008o002
spec:
@ -114,7 +109,6 @@ metadata:
name: rdm9r009o002
labels:
vino.airshipit.org/flavor: worker
sip.airshipit.org/scheduled: "false"
sip.airshipit.org/rack: r009
sip.airshipit.org/server: rdm9r009o002
spec:

View File

@ -1,6 +1,6 @@
kubectl label baremetalhosts -n metal3 --overwrite rdm9r006o001 sip.airshipit.org/rack=r006 sip.airshipit.org/server=rdm9r006o001 vino.airshipit.org/flavor=control-plane sip.airshipit.org/scheduled=false
kubectl label baremetalhosts -n metal3 --overwrite rdm9r006o002 sip.airshipit.org/rack=r006 sip.airshipit.org/server=rdm9r006o002 vino.airshipit.org/flavor=control-plane sip.airshipit.org/scheduled=false
kubectl label baremetalhosts -n metal3 --overwrite rdm9r007o001 sip.airshipit.org/rack=r007 sip.airshipit.org/server=rdm9r007o001 vino.airshipit.org/flavor=control-plane sip.airshipit.org/scheduled=false
kubectl label baremetalhosts -n metal3 --overwrite rdm9r007o002 sip.airshipit.org/rack=r007 sip.airshipit.org/server=rdm9r007o002 vino.airshipit.org/flavor=worker sip.airshipit.org/scheduled=false
kubectl label baremetalhosts -n metal3 --overwrite rdm9r008c002 sip.airshipit.org/rack=r008 sip.airshipit.org/server=rdm9r008c002 vino.airshipit.org/flavor=worker sip.airshipit.org/scheduled=false
kubectl label baremetalhosts -n metal3 --overwrite rdm9r009c002 sip.airshipit.org/rack=r009 sip.airshipit.org/server=rdm9r009c002 vino.airshipit.org/flavor=worker sip.airshipit.org/scheduled=false
kubectl label baremetalhosts -n metal3 --overwrite rdm9r006o001 sip.airshipit.org/rack=r006 sip.airshipit.org/server=rdm9r006o001 vino.airshipit.org/flavor=control-plane
kubectl label baremetalhosts -n metal3 --overwrite rdm9r006o002 sip.airshipit.org/rack=r006 sip.airshipit.org/server=rdm9r006o002 vino.airshipit.org/flavor=control-plane
kubectl label baremetalhosts -n metal3 --overwrite rdm9r007o001 sip.airshipit.org/rack=r007 sip.airshipit.org/server=rdm9r007o001 vino.airshipit.org/flavor=control-plane
kubectl label baremetalhosts -n metal3 --overwrite rdm9r007o002 sip.airshipit.org/rack=r007 sip.airshipit.org/server=rdm9r007o002 vino.airshipit.org/flavor=worker
kubectl label baremetalhosts -n metal3 --overwrite rdm9r008c002 sip.airshipit.org/rack=r008 sip.airshipit.org/server=rdm9r008c002 vino.airshipit.org/flavor=worker
kubectl label baremetalhosts -n metal3 --overwrite rdm9r009c002 sip.airshipit.org/rack=r009 sip.airshipit.org/server=rdm9r009c002 vino.airshipit.org/flavor=worker

View File

@ -1,6 +1,6 @@
kubectl label baremetalhosts -n metal3 --overwrite rdm9r006o001 sip.airshipit.org/scheduled=false sip.airshipit.org/node-type- sip.airshipit.org/workload-cluster- scheduled-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r006o002 sip.airshipit.org/scheduled=false sip.airshipit.org/node-type- sip.airshipit.org/workload-cluster- scheduled-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r007o001 sip.airshipit.org/scheduled=false sip.airshipit.org/node-type- sip.airshipit.org/workload-cluster- scheduled-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r007o002 sip.airshipit.org/scheduled=false sip.airshipit.org/node-type- sip.airshipit.org/workload-cluster- scheduled-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r008c002 sip.airshipit.org/scheduled=false sip.airshipit.org/node-type- sip.airshipit.org/workload-cluster- scheduled-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r009c002 sip.airshipit.org/scheduled=false sip.airshipit.org/node-type- sip.airshipit.org/workload-cluster- scheduled-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r006o001 sip.airshipit.org/node-type- sip.airshipit.org/cluster-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r006o002 sip.airshipit.org/node-type- sip.airshipit.org/cluster-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r007o001 sip.airshipit.org/node-type- sip.airshipit.org/cluster-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r007o002 sip.airshipit.org/node-type- sip.airshipit.org/cluster-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r008c002 sip.airshipit.org/node-type- sip.airshipit.org/cluster-
kubectl label baremetalhosts -n metal3 --overwrite rdm9r009c002 sip.airshipit.org/node-type- sip.airshipit.org/cluster-

View File

@ -1,3 +1,3 @@
SCHEDULED=$1
SCHEDULED=$1 # "DoesNotExist" (for unscheduled) or "Exists" (for scheduled)
FLAVOR=$2
kubectl get baremetalhosts --all-namespaces -l sip.airshipit.org/scheduled=$SCHEDULED,vino.airshipit.org/flavor=$FLAVOR --show-labels|grep -v NAME|awk '{print "____________\n",$2,"\n\t",$5,$6}'|sed -e's/,/\n\t/g'
kubectl get baremetalhosts --all-namespaces -l sip.airshipit.org/cluster $SCHEDULED,vino.airshipit.org/flavor=$FLAVOR --show-labels|grep -v NAME|awk '{print "____________\n",$2,"\n\t",$5,$6}'|sed -e's/,/\n\t/g'

1
go.sum
View File

@ -821,6 +821,7 @@ k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCk
k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc=
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0=
k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg=
k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA=
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=

View File

@ -31,6 +31,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
kerror "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -59,21 +60,15 @@ const (
)
const (
BaseAirshipSelector = "sip.airshipit.org"
SipScheduleLabelName = "scheduled"
SipScheduleLabel = BaseAirshipSelector + "/" + SipScheduleLabelName
SipScheduled = SipScheduleLabel + "=true"
SipNotScheduled = SipScheduleLabel + "=false"
BaseAirshipSelector = "sip.airshipit.org"
// This is a placeholder . Need to synchronize with ViNO the constants below
// Probable pll this or eqivakent values from a ViNO pkg
RackLabel = BaseAirshipSelector + "/rack"
ServerLabel = BaseAirshipSelector + "/server"
// Thislabekl is associated to group the colletcion of scheduled BMH's
// Will represent the Tenant Cluster or Service Function Cluster
SipClusterLabelName = "workload-cluster"
// This label is applied to all BMHs scheduled to a given SIPCluster.
SipClusterLabelName = "cluster"
SipClusterLabel = BaseAirshipSelector + "/" + SipClusterLabelName
SipNodeTypeLabelName = "node-type"
@ -199,17 +194,15 @@ func (ml *MachineList) init(nodes map[airshipv1.BMHRole]airshipv1.NodeSet) {
func (ml *MachineList) getBMHs(c client.Client) (*metal3.BareMetalHostList, error) {
bmhList := &metal3.BareMetalHostList{}
// I am thinking we can add a Label for unsccheduled.
// SIP Cluster can change it to scheduled.
// We can then simple use this to select UNSCHEDULED
/*
This possible will not be needed if I figured out how to provide a != label.
Then we can use DOESNT HAVE A TENANT LABEL
*/
scheduleLabels := map[string]string{SipScheduleLabel: "false"}
// Select BMH not yet labeled as scheduled by SIP
unscheduledSelector := labels.NewSelector()
r, err := labels.NewRequirement(SipClusterLabel, selection.DoesNotExist, nil)
if err == nil {
unscheduledSelector = unscheduledSelector.Add(*r)
}
ml.Log.Info("Getting all available BaremetalHosts that are not scheduled")
err := c.List(context.Background(), bmhList, client.MatchingLabels(scheduleLabels))
err = c.List(context.Background(), bmhList, client.MatchingLabelsSelector{Selector: unscheduledSelector})
if err != nil {
ml.Log.Info("Received an error while getting BaremetalHost list", "error", err.Error())
return bmhList, err
@ -218,7 +211,7 @@ func (ml *MachineList) getBMHs(c client.Client) (*metal3.BareMetalHostList, erro
if len(bmhList.Items) > 0 {
return bmhList, nil
}
return bmhList, fmt.Errorf("Unable to identify BMH available for scheduling. Selecting %v ", scheduleLabels)
return bmhList, fmt.Errorf("Unable to identify BMH available for scheduling. Selecting %v ", unscheduledSelector)
}
func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster,
@ -277,7 +270,6 @@ func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.BMHRole
bmhList := &metal3.BareMetalHostList{}
scheduleLabels := map[string]string{
SipScheduleLabel: "true",
SipClusterLabel: clusterName,
SipNodeTypeLabel: string(nodeRole),
}
@ -749,7 +741,6 @@ func (ml *MachineList) ApplyLabels(sip airshipv1.SIPCluster, c client.Client) er
bmh := &machine.BMH
fmt.Printf("ApplyLabels bmh.ObjectMeta.Name:%s\n", bmh.ObjectMeta.Name)
bmh.Labels[SipClusterLabel] = GetClusterLabel(sip)
bmh.Labels[SipScheduleLabel] = "true"
bmh.Labels[SipNodeTypeLabel] = string(machine.BMHRole)
// This is bombing when it find 1 error
@ -773,7 +764,6 @@ func (ml *MachineList) RemoveLabels(c client.Client) error {
fmt.Printf("RemoveLabels bmh.ObjectMeta.Name:%s\n", bmh.ObjectMeta.Name)
delete(bmh.Labels, SipClusterLabel)
delete(bmh.Labels, SipNodeTypeLabel)
bmh.Labels[SipScheduleLabel] = "false"
// This is bombing when it find 1 error
// Might be better to acculumalte the errors, and
@ -793,8 +783,7 @@ func (ml *MachineList) GetCluster(sip airshipv1.SIPCluster, c client.Client) err
bmhList := &metal3.BareMetalHostList{}
scheduleLabels := map[string]string{
SipScheduleLabel: "true",
SipClusterLabel: GetClusterLabel(sip),
SipClusterLabel: GetClusterLabel(sip),
}
err := c.List(context.Background(), bmhList, client.MatchingLabels(scheduleLabels))

View File

@ -22,6 +22,8 @@ const (
var _ = Describe("MachineList", func() {
var machineList *MachineList
var err error
unscheduledSelector := testutil.UnscheduledSelector()
BeforeEach(func() {
nodes := map[string]*Machine{}
for n := 0; n < numNodes; n++ {
@ -55,8 +57,8 @@ var _ = Describe("MachineList", func() {
It("Should produce a list of unscheduled BMH objects", func() {
// "Schedule" two nodes
machineList.Machines["node00"].BMH.Labels[SipScheduleLabel] = "true"
machineList.Machines["node01"].BMH.Labels[SipScheduleLabel] = "true"
machineList.Machines["node00"].BMH.Labels[SipClusterLabel] = "subcluster-1"
machineList.Machines["node01"].BMH.Labels[SipClusterLabel] = "subcluster-1"
scheduledNodes := []metal3.BareMetalHost{
machineList.Machines["node00"].BMH,
machineList.Machines["node01"].BMH,
@ -75,7 +77,7 @@ var _ = Describe("MachineList", func() {
for _, bmh := range bmhList.Items {
for _, scheduled := range scheduledNodes {
Expect(bmh).ToNot(Equal(scheduled))
Expect(bmh.Labels[SipScheduleLabel]).To(Equal("false"))
Expect(testutil.CompareLabels(unscheduledSelector, bmh.Labels)).To(Succeed())
}
}
})
@ -84,7 +86,7 @@ var _ = Describe("MachineList", func() {
// "Schedule" all nodes
var objs []runtime.Object
for _, machine := range machineList.Machines {
machine.BMH.Labels[SipScheduleLabel] = "true"
machine.BMH.Labels[SipClusterLabel] = "subcluster-1"
objs = append(objs, &machine.BMH)
}

View File

@ -26,6 +26,7 @@ import (
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -40,6 +41,8 @@ const (
var _ = Describe("SIPCluster controller", func() {
unscheduledSelector := testutil.UnscheduledSelector()
AfterEach(func() {
opts := []client.DeleteAllOfOption{client.InNamespace(testNamespace)}
Expect(k8sClient.DeleteAllOf(context.Background(), &metal3.BareMetalHost{}, opts...)).Should(Succeed())
@ -77,10 +80,8 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs until SIP has scheduled them to the SIP cluster
Eventually(func() error {
expectedLabels := map[string]string{
bmhpkg.SipScheduleLabel: "true",
bmhpkg.SipClusterLabel: bmhpkg.GetClusterLabel(*sipCluster),
}
expectedLabels := labels.SelectorFromSet(
map[string]string{bmhpkg.SipClusterLabel: bmhpkg.GetClusterLabel(*sipCluster)})
var bmh metal3.BareMetalHost
for node := range nodes {
@ -90,7 +91,7 @@ var _ = Describe("SIPCluster controller", func() {
}, &bmh)).Should(Succeed())
}
return compareLabels(expectedLabels, bmh.GetLabels())
return testutil.CompareLabels(expectedLabels, bmh.GetLabels())
}, 30, 5).Should(Succeed())
})
@ -114,9 +115,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
bmhpkg.SipScheduleLabel: "false",
}
expectedLabels := unscheduledSelector
var bmh metal3.BareMetalHost
for node := range nodes {
@ -126,7 +125,7 @@ var _ = Describe("SIPCluster controller", func() {
}, &bmh)).Should(Succeed())
}
return compareLabels(expectedLabels, bmh.GetLabels())
return testutil.CompareLabels(expectedLabels, bmh.GetLabels())
}, 30, 5).Should(Succeed())
// Validate SIP CR ready condition has been updated
@ -161,9 +160,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
bmhpkg.SipScheduleLabel: "false",
}
expectedLabels := unscheduledSelector
var bmh metal3.BareMetalHost
for node := range nodes {
@ -173,7 +170,7 @@ var _ = Describe("SIPCluster controller", func() {
}, &bmh)).Should(Succeed())
}
return compareLabels(expectedLabels, bmh.GetLabels())
return testutil.CompareLabels(expectedLabels, bmh.GetLabels())
}, 30, 5).Should(Succeed())
// Validate SIP CR ready condition has been updated
@ -224,9 +221,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
bmhpkg.SipScheduleLabel: "false",
}
expectedLabels := unscheduledSelector
var bmh metal3.BareMetalHost
for node := range nodes {
@ -236,7 +231,7 @@ var _ = Describe("SIPCluster controller", func() {
}, &bmh)).Should(Succeed())
}
return compareLabels(expectedLabels, bmh.GetLabels())
return testutil.CompareLabels(expectedLabels, bmh.GetLabels())
}, 30, 5).Should(Succeed())
// Validate SIP CR ready condition has been updated
@ -286,9 +281,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
bmhpkg.SipScheduleLabel: "false",
}
expectedLabels := unscheduledSelector
var bmh metal3.BareMetalHost
for node := range nodes {
@ -298,7 +291,7 @@ var _ = Describe("SIPCluster controller", func() {
}, &bmh)).Should(Succeed())
}
return compareLabels(expectedLabels, bmh.GetLabels())
return testutil.CompareLabels(expectedLabels, bmh.GetLabels())
}, 30, 5).Should(Succeed())
// Validate SIP CR ready condition has been updated
@ -356,9 +349,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
bmhpkg.SipScheduleLabel: "false",
}
expectedLabels := unscheduledSelector
var bmh metal3.BareMetalHost
for node := range nodes {
@ -368,7 +359,7 @@ var _ = Describe("SIPCluster controller", func() {
}, &bmh)).Should(Succeed())
}
return compareLabels(expectedLabels, bmh.GetLabels())
return testutil.CompareLabels(expectedLabels, bmh.GetLabels())
}, 30, 5).Should(Succeed())
// Validate SIP CR ready condition has been updated
@ -423,9 +414,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
bmhpkg.SipScheduleLabel: "false",
}
expectedLabels := unscheduledSelector
var bmh metal3.BareMetalHost
for node := range nodes {
@ -435,7 +424,7 @@ var _ = Describe("SIPCluster controller", func() {
}, &bmh)).Should(Succeed())
}
return compareLabels(expectedLabels, bmh.GetLabels())
return testutil.CompareLabels(expectedLabels, bmh.GetLabels())
}, 30, 5).Should(Succeed())
// Validate SIP CR ready condition has been updated
@ -452,19 +441,3 @@ var _ = Describe("SIPCluster controller", func() {
})
})
})
func compareLabels(expected map[string]string, actual map[string]string) error {
for k, v := range expected {
value, exists := actual[k]
if !exists {
return fmt.Errorf("label %s=%s missing. Has labels %v", k, v, actual)
}
if value != v {
return fmt.Errorf("label %s=%s does not match expected label %s=%s. Has labels %v", k, value, k,
v, actual)
}
}
return nil
}

View File

@ -3,9 +3,13 @@ package testutil
import (
"fmt"
"github.com/onsi/gomega"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
airshipv1 "sipcluster/pkg/api/v1"
)
@ -15,11 +19,18 @@ var bmhRoleToLabelValue = map[airshipv1.BMHRole]string{
airshipv1.RoleWorker: "worker",
}
func UnscheduledSelector() labels.Selector {
sel := labels.NewSelector()
r, err := labels.NewRequirement(sipClusterLabel, selection.DoesNotExist, nil)
gomega.Expect(err).Should(gomega.Succeed())
return sel.Add(*r)
}
// NOTE(aw442m): These constants have been redefined from the bmh package in order to avoid an import cycle.
const (
sipRackLabel = "sip.airshipit.org/rack"
sipScheduleLabel = "sip.airshipit.org/scheduled"
sipServerLabel = "sip.airshipit.org/server"
sipRackLabel = "sip.airshipit.org/rack"
sipClusterLabel = "sip.airshipit.org/cluster"
sipServerLabel = "sip.airshipit.org/server"
bmhLabel = "example.org/bmh-label"
@ -181,10 +192,9 @@ func CreateBMH(node int, namespace string, role airshipv1.BMHRole, rack int) (*m
Name: fmt.Sprintf("node0%d", node),
Namespace: namespace,
Labels: map[string]string{
bmhLabel: bmhRoleToLabelValue[role],
sipScheduleLabel: "false",
sipRackLabel: rackLabel,
sipServerLabel: fmt.Sprintf("stl2%so%d", rackLabel, node),
bmhLabel: bmhRoleToLabelValue[role],
sipRackLabel: rackLabel,
sipServerLabel: fmt.Sprintf("stl2%so%d", rackLabel, node),
},
},
Spec: metal3.BareMetalHostSpec{
@ -303,3 +313,11 @@ func CreateBMCAuthSecret(nodeName string, namespace string, username string, pas
},
}
}
func CompareLabels(expected labels.Selector, actual map[string]string) error {
if !expected.Matches(labels.Set(actual)) {
return fmt.Errorf("labels do not match expected selector %v. Has labels %v", expected, actual)
}
return nil
}