Change NodePort to NodePorts

- Addded Finalize entry point for Service
- Services Namespace named Sip.Spec.Config.ClusteName is createdd durign
  SIPCluster deployment
- Services Namespace is deleted dring SIPCluster deletion
- Some WIP with Servicee Specific flows
- Change Sample SIPCluster to be NodePorts instead of NodePort
This commit is contained in:
Rodolfo Pacheco 2020-11-17 18:06:54 -05:00
parent 932baf0979
commit cca11d768e
9 changed files with 160 additions and 33 deletions

Binary file not shown.

View File

@ -45,3 +45,15 @@ rules:
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- create
- delete
- update
- get
- list
- watch

View File

@ -3,15 +3,17 @@ kind: SIPCluster
metadata:
name: sipcluster-test1
namespace: sipcluster-system
finalizers:
- sip.airship.airshipit.org/finalizer
spec:
config:
cluster-name: cname
cluster-name: subcluster-test1
nodes:
worker:
vm-flavor: 'airshipit.org/vino-flavor=worker'
scheduling-constraints: ['per-node'] # Support dont'care option.
count:
active: 1 #driven by capi node number
active: 2 #driven by capi node number
standby: 1 #slew for upgrades etc
master:
vm-flavor: 'airshipit.org/vino-flavor=master'
@ -26,7 +28,10 @@ spec:
image: haproxy:foo
nodeLabels:
- airship-masters
nodePort: 7000
nodePorts:
- 7000
- 7001
- 7002
nodeInterfaceId: oam-ipv4
jumppod:
optional:
@ -34,12 +39,14 @@ spec:
image: sshpod:foo
nodeLabels:
- airship-masters
nodePort: 7022
nodePorts:
- 7022
nodeInterfaceId: oam-ipv4
authpod:
image: sshpod:foo
nodeLabels:
- airship-masters
nodePort: 7022
nodePorts:
- 7023
nodeInterfaceId: oam-ipv4

View File

@ -104,7 +104,7 @@ type InfraConfig struct {
OptionalData *OptsConfig `json:"optional,omitempty"`
Image string `json:"image,omitempty"`
NodeLabels map[string]string `json:"nodelabels,omitempty"`
NodePort int `json:"nodePort,omitempty"`
NodePorts []int `json:"nodePorts,omitempty"`
NodeInterface string `json:"nodeInterfaceId,omitempty"`
}
@ -120,10 +120,10 @@ type VmRoles string
// Possible Node or VM Roles for a Tenant
const (
// VmMaster means the state is unknown
VmMaster VmRoles = "Master"
VmMaster VmRoles = "master"
// VmWorker means the state is unknown
VmWorker VmRoles = "Worker"
VmWorker VmRoles = "worker"
)
// VmCount

View File

@ -39,6 +39,11 @@ func (in *InfraConfig) DeepCopyInto(out *InfraConfig) {
(*out)[key] = val
}
}
if in.NodePorts != nil {
in, out := &in.NodePorts, &out.NodePorts
*out = make([]int, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfraConfig.

View File

@ -191,7 +191,7 @@ func (r *SIPClusterReconciler) deployInfra(sip airshipv1.SIPCluster, machines *a
}
// Lets deploy the Service
err = service.Deploy(machines, r.Client)
err = service.Deploy(sip, machines, r.Client)
if err != nil {
return err
}
@ -211,7 +211,7 @@ finish shoulld take care of any wrpa up tasks..
*/
func (r *SIPClusterReconciler) finish(sip airshipv1.SIPCluster, machines *airshipvms.MachineList) error {
// Label the vBMH's
// UnLabel the vBMH's
err := machines.ApplyLabels(sip, r.Client)
if err != nil {
return err
@ -226,6 +226,23 @@ Deal with Deletion andd Finalizers if any is needed
Such as i'e what are we doing with the lables on teh vBMH's
**/
func (r *SIPClusterReconciler) finalize(sip airshipv1.SIPCluster) error {
for sName, sConfig := range sip.Spec.InfraServices {
// Instantiate
service, err := airshipsvc.NewService(sName, sConfig)
if err != nil {
return err
}
// Lets clean Service specific stuff
err = service.Finalize(sip, r.Client)
if err != nil {
return err
}
}
// Clean Up common servicce stuff
airshipsvc.FinalizeCommon(sip, r.Client)
// 1- Let me retrieve all vBMH mapped for this SIP Cluster
// 2- Let me now select the one's that meet teh scheduling criteria
// If I schedule successfully then

View File

@ -15,10 +15,14 @@
package services
import (
"context"
"fmt"
airshipv1 "sipcluster/pkg/api/v1"
airshipvms "sipcluster/pkg/vbmh"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@ -30,8 +34,9 @@ import (
// Validate : will make sure that the deployment is successfull
type InfrastructureService interface {
//
Deploy(*airshipvms.MachineList, client.Client) error
Deploy(airshipv1.SIPCluster, *airshipvms.MachineList, client.Client) error
Validate() error
Finalize(airshipv1.SIPCluster, client.Client) error
}
// Generic Service Factory
@ -40,16 +45,46 @@ type Service struct {
config airshipv1.InfraConfig
}
func (s *Service) Deploy(machines *airshipvms.MachineList, c client.Client) error {
func (s *Service) Deploy(sip airshipv1.SIPCluster, machines *airshipvms.MachineList, c client.Client) error {
// do something, might decouple this a bit
// If the serviucces are defined as Helm Chart , then deploy might be simply
//. Lets make sure that teh namespace is in pace.
// will be called the name of the cluster.
s.createNS(sip.Spec.Config.ClusterName, c)
// Take the data from teh appropriate Machines
// Prepare the Config
fmt.Printf("Deploy Service:%v \n", s.serviceName)
return nil
}
func (s *Service) createNS(serviceNamespaceName string, c client.Client) error {
// Get Namespace
// If not foundn then ccreate it
ns := &corev1.Namespace{}
// c is a created client.
err := c.Get(context.Background(), client.ObjectKey{
Name: serviceNamespaceName,
}, ns)
if err != nil {
serviceNamespace := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: serviceNamespaceName,
},
}
if err := c.Create(context.TODO(), serviceNamespace); err != nil {
return err
}
}
return nil
}
func (s *Service) Validate() error {
// do something, might decouple this a bit
fmt.Printf("Validate Service:%v \n", s.serviceName)
@ -58,6 +93,28 @@ func (s *Service) Validate() error {
}
func (s *Service) Finalize(sip airshipv1.SIPCluster, c client.Client) error {
return nil
}
func FinalizeCommon(sip airshipv1.SIPCluster, c client.Client) error {
serviceNamespace := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: sip.Spec.Config.ClusterName,
},
}
if err := c.Delete(context.TODO(), serviceNamespace); err != nil {
return err
}
return nil
}
// Service Factory
func NewService(infraName airshipv1.InfraService, infraCfg airshipv1.InfraConfig) (InfrastructureService, error) {
if infraName == airshipv1.LoadBalancerService {

View File

@ -15,13 +15,41 @@
package services
import (
"fmt"
airshipv1 "sipcluster/pkg/api/v1"
airshipvms "sipcluster/pkg/vbmh"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type LoadBalancer struct {
Service
}
func (l *LoadBalancer) Deploy(sip airshipv1.SIPCluster, machines *airshipvms.MachineList, c client.Client) error {
// do something, might decouple this a bit
// If the serviucces are defined as Helm Chart , then deploy might be simply
// Take the data from teh appropriate Machines
// Prepare the Config
l.Service.Deploy(sip, machines, c)
err := l.Prepare(sip, machines, c)
if err != nil {
return err
}
return nil
}
func (l *LoadBalancer) Prepare(sip airshipv1.SIPCluster, machines *airshipvms.MachineList, c client.Client) error {
fmt.Printf("%s.Prepare machines:%s \n", l.Service.serviceName, machines)
for _, machine := range machines.Vbmhs {
if machine.VmRole == airshipv1.VmMaster {
fmt.Printf("%s.Prepare for machine:%s ip is %s\n", l.Service.serviceName, machine, machine.Data.IpOnInterface[sip.Spec.InfraServices[l.Service.serviceName].NodeInterface])
}
}
return nil
}
func newLoadBalancer(infraCfg airshipv1.InfraConfig) InfrastructureService {
return &LoadBalancer{
Service: Service{

View File

@ -114,7 +114,8 @@ type MachineData struct {
// MachineList contains the list of Scheduled or ToBeScheduled machines
type MachineList struct {
bmhs map[string]*Machine
// ViNO BMH
Vbmhs map[string]*Machine
// Keep track of how many we have mark for scheduled.
Scheduled map[airshipv1.VmRoles]int
}
@ -124,14 +125,14 @@ func (ml *MachineList) hasMachine(bmh metal3.BareMetalHost) bool {
if &bmh == nil {
return false
}
fmt.Printf("Schedule.hasMachine bmh.ObjectMeta.Name:%s ml.bmhs[bmh.ObjectMeta.Name] :%v , answer :%t \n", bmh.ObjectMeta.Name, ml.bmhs[bmh.ObjectMeta.Name], (ml.bmhs[bmh.ObjectMeta.Name] != nil))
return ml.bmhs[bmh.ObjectMeta.Name] != nil
fmt.Printf("Schedule.hasMachine bmh.ObjectMeta.Name:%s ml.Vbmhs[bmh.ObjectMeta.Name] :%v , answer :%t \n", bmh.ObjectMeta.Name, ml.Vbmhs[bmh.ObjectMeta.Name], (ml.Vbmhs[bmh.ObjectMeta.Name] != nil))
return ml.Vbmhs[bmh.ObjectMeta.Name] != nil
}
func (ml *MachineList) String() string {
var sb strings.Builder
for mName, machine := range ml.bmhs {
for mName, machine := range ml.Vbmhs {
sb.WriteString("[" + mName + "]:" + machine.String())
}
@ -159,14 +160,14 @@ func (ml *MachineList) Schedule(sip airshipv1.SIPCluster, c client.Client) error
// If I get here the MachineList should have a selected set of Machine's
// They are in the ScheduleStatus of ToBeScheduled as well as the Role
//
fmt.Printf("Schedule ml.bmhs size:%d\n", len(ml.bmhs))
fmt.Printf("Schedule ml.Vbmhs size:%d\n", len(ml.Vbmhs))
return nil
}
func (ml *MachineList) init(nodes map[airshipv1.VmRoles]airshipv1.NodeSet) {
// Only Initialize 1st time
fmt.Printf("Schedule.init len(ml.bmhs):%d\n", len(ml.bmhs))
if len(ml.bmhs) == 0 {
fmt.Printf("Schedule.init len(ml.Vbmhs):%d\n", len(ml.Vbmhs))
if len(ml.Vbmhs) == 0 {
mlSize := 0
mlNodeTypes := 0
for _, nodeCfg := range nodes {
@ -175,7 +176,7 @@ func (ml *MachineList) init(nodes map[airshipv1.VmRoles]airshipv1.NodeSet) {
}
//fmt.Printf("Schedule.init mlSize:%d\n", mlSize)
ml.Scheduled = make(map[airshipv1.VmRoles]int, mlNodeTypes)
ml.bmhs = make(map[string]*Machine, 0)
ml.Vbmhs = make(map[string]*Machine, 0)
}
}
@ -226,7 +227,7 @@ func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster, bmhList *metal3.B
return err
}
}
fmt.Printf("Schedule.identifyNodes %s size:%d\n", ml.String(), len(ml.bmhs))
fmt.Printf("Schedule.identifyNodes %s size:%d\n", ml.String(), len(ml.Vbmhs))
return nil
}
@ -278,7 +279,7 @@ func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles
for _, bmh := range bmhList.Items {
if !ml.hasMachine(bmh) {
// Add it to the list.
ml.bmhs[bmh.ObjectMeta.Name] = NewMachine(bmh, nodeRole, Scheduled)
ml.Vbmhs[bmh.ObjectMeta.Name] = NewMachine(bmh, nodeRole, Scheduled)
ml.Scheduled[nodeRole] = ml.Scheduled[nodeRole] + 1
}
}
@ -297,7 +298,7 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
// Reduce from the list of BMH's already scheduled and labeled with the Cluster Name
// Reduce from the number of Machines I have identified already to be Labeled
nodeTarget := (nodeCfg.Count.Active + nodeCfg.Count.Standby) - ml.countScheduledAndTobeScheduled(nodeRole, c, sipCfg)
fmt.Printf("Schedule.scheduleIt nodeRole:%v nodeTarget:%d nodeCfg.VmFlavor:%s ml.bmhs len:%d \n", nodeRole, nodeTarget, nodeCfg.VmFlavor, len(ml.bmhs))
fmt.Printf("Schedule.scheduleIt nodeRole:%v nodeTarget:%d nodeCfg.VmFlavor:%s ml.Vbmhs len:%d \n", nodeRole, nodeTarget, nodeCfg.VmFlavor, len(ml.Vbmhs))
// Nothing to schedule
if nodeTarget == 0 {
return nil
@ -332,14 +333,14 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
}
}
fmt.Printf("Schedule.scheduleIt validBmh:%t, bmh.ObjectMeta.Name:%s ml.bmhs len:%d\n", validBmh, bmh.ObjectMeta.Name, len(ml.bmhs))
fmt.Printf("Schedule.scheduleIt validBmh:%t, bmh.ObjectMeta.Name:%s ml.Vbmhs len:%d\n", validBmh, bmh.ObjectMeta.Name, len(ml.Vbmhs))
// All the constraints have been checked
// Only if its not in the list already
if validBmh {
// Lets add it to the list as a schedulable thing
ml.bmhs[bmh.ObjectMeta.Name] = NewMachine(bmh, nodeRole, ToBeScheduled)
ml.Vbmhs[bmh.ObjectMeta.Name] = NewMachine(bmh, nodeRole, ToBeScheduled)
ml.Scheduled[nodeRole] = ml.Scheduled[nodeRole] + 1
fmt.Printf("---------------\nSchedule.scheduleIt ADDED machine:%s \n", ml.bmhs[bmh.ObjectMeta.Name].String())
fmt.Printf("---------------\nSchedule.scheduleIt ADDED machine:%s \n", ml.Vbmhs[bmh.ObjectMeta.Name].String())
// TODO Probable should remove the bmh from the list so if there are other node targets they dont even take it into account
nodeTarget = nodeTarget - 1
if nodeTarget == 0 {
@ -367,8 +368,8 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
func (ml *MachineList) Extrapolate(sip airshipv1.SIPCluster, c client.Client) bool {
// Lets get the data for all selected BMH's.
extrapolateSuccess := true
fmt.Printf("Schedule.Extrapolate ml.bmhs:%d\n", len(ml.bmhs))
for _, machine := range ml.bmhs {
fmt.Printf("Schedule.Extrapolate ml.Vbmhs:%d\n", len(ml.Vbmhs))
for _, machine := range ml.Vbmhs {
fmt.Printf("Schedule.Extrapolate machine.Data.IpOnInterface len:%d machine:%v \n", len(machine.Data.IpOnInterface), machine)
// Skip if I alread extrapolated tehh data for this machine
@ -639,8 +640,8 @@ This is done only after the Infrastcuture Services have been deployed
*/
func (ml *MachineList) ApplyLabels(sip airshipv1.SIPCluster, c client.Client) error {
fmt.Printf("ApplyLabels %s size:%d\n", ml.String(), len(ml.bmhs))
for _, machine := range ml.bmhs {
fmt.Printf("ApplyLabels %s size:%d\n", ml.String(), len(ml.Vbmhs))
for _, machine := range ml.Vbmhs {
// Only Add LAbels to Machines that are not amrked to be scheduled
if machine.ScheduleStatus == ToBeScheduled {
bmh := &machine.Bmh
@ -667,8 +668,8 @@ RemoveLabels
*/
func (ml *MachineList) RemoveLabels(sip airshipv1.SIPCluster, c client.Client) error {
fmt.Printf("ApplyLabels %s size:%d\n", ml.String(), len(ml.bmhs))
for _, machine := range ml.bmhs {
fmt.Printf("ApplyLabels %s size:%d\n", ml.String(), len(ml.Vbmhs))
for _, machine := range ml.Vbmhs {
bmh := &machine.Bmh
fmt.Printf("RemoveLabels bmh.ObjectMeta.Name:%s\n", bmh.ObjectMeta.Name)
@ -705,7 +706,7 @@ func (ml *MachineList) GetCluster(sip airshipv1.SIPCluster, c client.Client) err
}
for _, bmh := range bmhList.Items {
ml.bmhs[bmh.ObjectMeta.Name] = &Machine{
ml.Vbmhs[bmh.ObjectMeta.Name] = &Machine{
Bmh: bmh,
ScheduleStatus: Scheduled,
VmRole: airshipv1.VmRoles(bmh.Labels[SipNodeTypeLabel]),