Merge pull request #24 from drewwalters96/constraint-tests
Add scheduling constraint integration tests
This commit is contained in:
commit
3b057bf64f
@ -33,6 +33,10 @@ import (
|
||||
"sipcluster/testutil"
|
||||
)
|
||||
|
||||
const (
|
||||
testNamespace = "default"
|
||||
)
|
||||
|
||||
var _ = Describe("SIPCluster controller", func() {
|
||||
Context("When it detects a new SIPCluster", func() {
|
||||
It("Should schedule available nodes", func() {
|
||||
@ -40,16 +44,15 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create vBMH test objects
|
||||
nodes := []string{"master", "master", "master", "worker", "worker", "worker", "worker"}
|
||||
namespace := "default"
|
||||
for node, role := range nodes {
|
||||
vBMH, networkData := testutil.CreateBMH(node, namespace, role, 6)
|
||||
vBMH, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
}
|
||||
|
||||
// Create SIP cluster
|
||||
clusterName := "subcluster-test1"
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, namespace, 3, 4)
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, testNamespace, 3, 4)
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs until SIP has scheduled them to the SIP cluster
|
||||
@ -63,7 +66,7 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
for node := range nodes {
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
|
||||
Name: fmt.Sprintf("node0%d", node),
|
||||
Namespace: namespace,
|
||||
Namespace: testNamespace,
|
||||
}, &bmh)).Should(Succeed())
|
||||
}
|
||||
|
||||
@ -78,16 +81,15 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create vBMH test objects
|
||||
nodes := []string{"master", "master", "worker", "worker", "worker", "worker"}
|
||||
namespace := "default"
|
||||
for node, role := range nodes {
|
||||
vBMH, networkData := testutil.CreateBMH(node, namespace, role, 6)
|
||||
vBMH, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
}
|
||||
|
||||
// Create SIP cluster
|
||||
clusterName := "subcluster-test2"
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, namespace, 3, 4)
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, testNamespace, 3, 4)
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
@ -100,7 +102,7 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
for node := range nodes {
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
|
||||
Name: fmt.Sprintf("node0%d", node),
|
||||
Namespace: namespace,
|
||||
Namespace: testNamespace,
|
||||
}, &bmh)).Should(Succeed())
|
||||
}
|
||||
|
||||
@ -115,16 +117,16 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
// Create vBMH test objects
|
||||
nodes := []string{"master", "master", "master", "worker", "worker"}
|
||||
namespace := "default"
|
||||
testNamespace := "default"
|
||||
for node, role := range nodes {
|
||||
vBMH, networkData := testutil.CreateBMH(node, namespace, role, 6)
|
||||
vBMH, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
}
|
||||
|
||||
// Create SIP cluster
|
||||
clusterName := "subcluster-test3"
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, namespace, 3, 4)
|
||||
clusterName := "subcluster-test4"
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, testNamespace, 3, 4)
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
@ -137,7 +139,7 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
for node := range nodes {
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
|
||||
Name: fmt.Sprintf("node0%d", node),
|
||||
Namespace: namespace,
|
||||
Namespace: testNamespace,
|
||||
}, &bmh)).Should(Succeed())
|
||||
}
|
||||
|
||||
@ -146,6 +148,233 @@ var _ = Describe("SIPCluster controller", func() {
|
||||
|
||||
cleanTestResources()
|
||||
})
|
||||
|
||||
Context("With per-node scheduling", func() {
|
||||
It("Should not schedule two worker nodes to the same server", func() {
|
||||
By("Not labeling any nodes")
|
||||
|
||||
// Create vBMH test objects
|
||||
var nodes []*metal3.BareMetalHost
|
||||
baremetalServer := "r06o001"
|
||||
|
||||
vBMH, networkData := testutil.CreateBMH(0, testNamespace, "master", 6)
|
||||
vBMH.Labels[vbmh.ServerLabel] = baremetalServer
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
vBMH, networkData = testutil.CreateBMH(1, testNamespace, "worker", 6)
|
||||
vBMH.Labels[vbmh.ServerLabel] = baremetalServer
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
vBMH, networkData = testutil.CreateBMH(2, testNamespace, "worker", 6)
|
||||
vBMH.Labels[vbmh.ServerLabel] = baremetalServer
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
// Create SIP cluster
|
||||
clusterName := "subcluster-test5"
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, testNamespace, 1, 2)
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
Consistently(func() error {
|
||||
expectedLabels := map[string]string{
|
||||
vbmh.SipScheduleLabel: "false",
|
||||
}
|
||||
|
||||
var bmh metal3.BareMetalHost
|
||||
for node := range nodes {
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
|
||||
Name: fmt.Sprintf("node0%d", node),
|
||||
Namespace: testNamespace,
|
||||
}, &bmh)).Should(Succeed())
|
||||
}
|
||||
|
||||
return compareLabels(expectedLabels, bmh.GetLabels())
|
||||
}, 30, 5).Should(Succeed())
|
||||
|
||||
cleanTestResources()
|
||||
})
|
||||
|
||||
It("Should not schedule two master nodes to the same server", func() {
|
||||
By("Not labeling any nodes")
|
||||
|
||||
// Create vBMH test objects
|
||||
var nodes []*metal3.BareMetalHost
|
||||
baremetalServer := "r06o001"
|
||||
|
||||
vBMH, networkData := testutil.CreateBMH(0, testNamespace, "master", 6)
|
||||
vBMH.Labels[vbmh.ServerLabel] = baremetalServer
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
vBMH, networkData = testutil.CreateBMH(1, testNamespace, "master", 6)
|
||||
vBMH.Labels[vbmh.ServerLabel] = baremetalServer
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
vBMH, networkData = testutil.CreateBMH(2, testNamespace, "worker", 6)
|
||||
vBMH.Labels[vbmh.ServerLabel] = baremetalServer
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
// Create SIP cluster
|
||||
clusterName := "subcluster-test6"
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, testNamespace, 2, 1)
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
Consistently(func() error {
|
||||
expectedLabels := map[string]string{
|
||||
vbmh.SipScheduleLabel: "false",
|
||||
}
|
||||
|
||||
var bmh metal3.BareMetalHost
|
||||
for node := range nodes {
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
|
||||
Name: fmt.Sprintf("node0%d", node),
|
||||
Namespace: testNamespace,
|
||||
}, &bmh)).Should(Succeed())
|
||||
}
|
||||
|
||||
return compareLabels(expectedLabels, bmh.GetLabels())
|
||||
}, 30, 5).Should(Succeed())
|
||||
|
||||
cleanTestResources()
|
||||
})
|
||||
})
|
||||
|
||||
Context("With per-rack scheduling", func() {
|
||||
It("Should not schedule two worker nodes to the same rack", func() {
|
||||
By("Not labeling any nodes")
|
||||
|
||||
// Create vBMH test objects
|
||||
var nodes []*metal3.BareMetalHost
|
||||
testNamespace := "default"
|
||||
|
||||
vBMH, networkData := testutil.CreateBMH(0, testNamespace, "master", 6)
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
vBMH, networkData = testutil.CreateBMH(1, testNamespace, "worker", 6)
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
vBMH, networkData = testutil.CreateBMH(2, testNamespace, "worker", 6)
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
// Create SIP cluster
|
||||
clusterName := "subcluster-test3"
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, testNamespace, 1, 2)
|
||||
|
||||
masterSpec := sipCluster.Spec.Nodes[airshipv1.VMMaster]
|
||||
masterSpec.Scheduling = airshipv1.RackAntiAffinity
|
||||
sipCluster.Spec.Nodes[airshipv1.VMMaster] = masterSpec
|
||||
|
||||
workerSpec := sipCluster.Spec.Nodes[airshipv1.VMWorker]
|
||||
workerSpec.Scheduling = airshipv1.RackAntiAffinity
|
||||
sipCluster.Spec.Nodes[airshipv1.VMWorker] = workerSpec
|
||||
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
Consistently(func() error {
|
||||
expectedLabels := map[string]string{
|
||||
vbmh.SipScheduleLabel: "false",
|
||||
}
|
||||
|
||||
var bmh metal3.BareMetalHost
|
||||
for node := range nodes {
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
|
||||
Name: fmt.Sprintf("node0%d", node),
|
||||
Namespace: testNamespace,
|
||||
}, &bmh)).Should(Succeed())
|
||||
}
|
||||
|
||||
return compareLabels(expectedLabels, bmh.GetLabels())
|
||||
}, 30, 5).Should(Succeed())
|
||||
|
||||
cleanTestResources()
|
||||
})
|
||||
|
||||
It("Should not schedule two master nodes to the same rack", func() {
|
||||
By("Not labeling any nodes")
|
||||
|
||||
// Create vBMH test objects
|
||||
var nodes []*metal3.BareMetalHost
|
||||
|
||||
vBMH, networkData := testutil.CreateBMH(0, testNamespace, "master", 6)
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
vBMH, networkData = testutil.CreateBMH(1, testNamespace, "master", 6)
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
vBMH, networkData = testutil.CreateBMH(2, testNamespace, "worker", 6)
|
||||
|
||||
nodes = append(nodes, vBMH)
|
||||
Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
|
||||
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
|
||||
|
||||
// Create SIP cluster
|
||||
clusterName := "subcluster-test3"
|
||||
sipCluster := testutil.CreateSIPCluster(clusterName, testNamespace, 2, 1)
|
||||
|
||||
masterSpec := sipCluster.Spec.Nodes[airshipv1.VMMaster]
|
||||
masterSpec.Scheduling = airshipv1.RackAntiAffinity
|
||||
sipCluster.Spec.Nodes[airshipv1.VMMaster] = masterSpec
|
||||
|
||||
workerSpec := sipCluster.Spec.Nodes[airshipv1.VMWorker]
|
||||
workerSpec.Scheduling = airshipv1.RackAntiAffinity
|
||||
sipCluster.Spec.Nodes[airshipv1.VMWorker] = workerSpec
|
||||
|
||||
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
|
||||
|
||||
// Poll BMHs and validate they are not scheduled
|
||||
Consistently(func() error {
|
||||
expectedLabels := map[string]string{
|
||||
vbmh.SipScheduleLabel: "false",
|
||||
}
|
||||
|
||||
var bmh metal3.BareMetalHost
|
||||
for node := range nodes {
|
||||
Expect(k8sClient.Get(context.Background(), types.NamespacedName{
|
||||
Name: fmt.Sprintf("node0%d", node),
|
||||
Namespace: testNamespace,
|
||||
}, &bmh)).Should(Succeed())
|
||||
}
|
||||
|
||||
return compareLabels(expectedLabels, bmh.GetLabels())
|
||||
}, 30, 5).Should(Succeed())
|
||||
|
||||
cleanTestResources()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -16,6 +16,8 @@ const (
|
||||
sipScheduleLabel = "sip.airshipit.org/sip-scheduled"
|
||||
sipServerLabel = "sip.airshipit.org/server"
|
||||
|
||||
VinoFlavorLabel = "airshipit.org/vino-flavor"
|
||||
|
||||
networkDataContent = `
|
||||
{
|
||||
"links": [
|
||||
|
Loading…
x
Reference in New Issue
Block a user