From a3ac8ac7dcf6a843988907261285fcb2f2c233c5 Mon Sep 17 00:00:00 2001 From: Kostiantyn Kalynovskyi Date: Wed, 2 Dec 2020 17:31:57 -0600 Subject: [PATCH] Add spread topology instead of scheduling-constraint --- .../airship.airshipit.org_sipclusters.yaml | 14 ++- config/rbac/role.yaml | 9 ++ .../samples/airship_v1beta1_sipcluster.yaml | 4 +- pkg/api/v1/sipcluster_types.go | 10 +-- pkg/api/v1/zz_generated.deepcopy.go | 5 -- pkg/controllers/sipcluster_controller_test.go | 16 ++-- pkg/vbmh/errors.go | 11 ++- pkg/vbmh/machines.go | 89 ++++++++----------- pkg/vbmh/vbmh_test.go | 16 ++-- 9 files changed, 85 insertions(+), 89 deletions(-) diff --git a/config/crd/bases/airship.airshipit.org_sipclusters.yaml b/config/crd/bases/airship.airshipit.org_sipclusters.yaml index 4c97696..73120ac 100644 --- a/config/crd/bases/airship.airshipit.org_sipclusters.yaml +++ b/config/crd/bases/airship.airshipit.org_sipclusters.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.2.5 + controller-gen.kubebuilder.io/version: (devel) creationTimestamp: null name: sipclusters.airship.airshipit.org spec: @@ -35,7 +35,7 @@ spec: description: SIPClusterSpec defines the desired state of SIPCluster properties: config: - description: SIPClusterSpec defines the desired state of SIPCluster + description: SipConfig defines the desired state of SIPCluster properties: cluster-name: description: Cluster NAme to be used for labeling vBMH @@ -85,13 +85,11 @@ spec: standby: type: integer type: object - scheduling-constraints: + spreadTopology: description: PlaceHolder until we define the real expected Implementation Scheduling define constraints the allows the SIP Scheduler to identify the required BMH's to allow CAPI to build a cluster - items: - type: string - type: array + type: string vm-flavor: description: VmFlavor is essentially a Flavor label identifying the type of Node that meets the construction reqirements @@ -117,5 +115,5 @@ status: acceptedNames: kind: "" plural: "" - conditions: [] - storedVersions: [] + conditions: null + storedVersions: null diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 9c560d9..c899040 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -26,3 +26,12 @@ rules: - get - patch - update +- apiGroups: + - metal3.io + resources: + - baremetalhosts + verbs: + - get + - list + - patch + - update diff --git a/config/samples/airship_v1beta1_sipcluster.yaml b/config/samples/airship_v1beta1_sipcluster.yaml index d148708..166fd62 100644 --- a/config/samples/airship_v1beta1_sipcluster.yaml +++ b/config/samples/airship_v1beta1_sipcluster.yaml @@ -11,13 +11,13 @@ spec: nodes: worker: vm-flavor: 'airshipit.org/vino-flavor=worker' - scheduling-constraints: ['per-node'] # Support dont'care option. + spreadTopology: 'per-node' # Support dont'care option. count: active: 2 #driven by capi node number standby: 1 #slew for upgrades etc master: vm-flavor: 'airshipit.org/vino-flavor=master' - scheduling-constraints: ['per-node','per-rack'] + spreadTopology: 'per-rack' count: active: 1 standby: 1 diff --git a/pkg/api/v1/sipcluster_types.go b/pkg/api/v1/sipcluster_types.go index 7ef4c1e..d6e41dc 100644 --- a/pkg/api/v1/sipcluster_types.go +++ b/pkg/api/v1/sipcluster_types.go @@ -43,7 +43,7 @@ type SIPClusterSpec struct { InfraServices map[InfraService]InfraConfig `json:"infra"` } -// SIPClusterSpec defines the desired state of SIPCluster +// SipConfig defines the desired state of SIPCluster type SipConfig struct { // Cluster NAme to be used for labeling vBMH ClusterName string `json:"cluster-name,omitempty"` @@ -84,20 +84,20 @@ type NodeSet struct { // Implementation // Scheduling define constraints the allows the SIP Scheduler // to identify the required BMH's to allow CAPI to build a cluster - Scheduling []SchedulingOptions `json:"scheduling-constraints,omitempty"` + Scheduling SpreadTopology `json:"spreadTopology,omitempty"` // Count defines the scale expectations for the Nodes Count *VmCount `json:"count,omitempty"` } -type SchedulingOptions string +type SpreadTopology string // Possible Node or VM Roles for a Tenant const ( // RackAntiAffinity means the state is unknown - RackAntiAffinity SchedulingOptions = "per-rack" + RackAntiAffinity SpreadTopology = "per-rack" // ServerAntiAffinity means the state is unknown - ServerAntiAffinity SchedulingOptions = "per-node" + ServerAntiAffinity SpreadTopology = "per-node" ) type InfraConfig struct { diff --git a/pkg/api/v1/zz_generated.deepcopy.go b/pkg/api/v1/zz_generated.deepcopy.go index 69584d7..7c4e7ec 100644 --- a/pkg/api/v1/zz_generated.deepcopy.go +++ b/pkg/api/v1/zz_generated.deepcopy.go @@ -59,11 +59,6 @@ func (in *InfraConfig) DeepCopy() *InfraConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeSet) DeepCopyInto(out *NodeSet) { *out = *in - if in.Scheduling != nil { - in, out := &in.Scheduling, &out.Scheduling - *out = make([]SchedulingOptions, len(*in)) - copy(*out, *in) - } if in.Count != nil { in, out := &in.Count, &out.Count *out = new(VmCount) diff --git a/pkg/controllers/sipcluster_controller_test.go b/pkg/controllers/sipcluster_controller_test.go index bad059a..54ee9c9 100644 --- a/pkg/controllers/sipcluster_controller_test.go +++ b/pkg/controllers/sipcluster_controller_test.go @@ -219,21 +219,17 @@ func createSIPCluster(name string, namespace string, masters int, workers int) * ClusterName: name, }, Nodes: map[airshipv1.VmRoles]airshipv1.NodeSet{ - airshipv1.VmMaster: airshipv1.NodeSet{ - VmFlavor: "airshipit.org/vino-flavor=master", - Scheduling: []airshipv1.SchedulingOptions{ - airshipv1.ServerAntiAffinity, - }, + airshipv1.VmMaster: { + VmFlavor: "airshipit.org/vino-flavor=master", + Scheduling: airshipv1.ServerAntiAffinity, Count: &airshipv1.VmCount{ Active: masters, Standby: 0, }, }, - airshipv1.VmWorker: airshipv1.NodeSet{ - VmFlavor: "airshipit.org/vino-flavor=worker", - Scheduling: []airshipv1.SchedulingOptions{ - airshipv1.ServerAntiAffinity, - }, + airshipv1.VmWorker: { + VmFlavor: "airshipit.org/vino-flavor=worker", + Scheduling: airshipv1.ServerAntiAffinity, Count: &airshipv1.VmCount{ Active: workers, Standby: 0, diff --git a/pkg/vbmh/errors.go b/pkg/vbmh/errors.go index 5fd9140..04cdf71 100644 --- a/pkg/vbmh/errors.go +++ b/pkg/vbmh/errors.go @@ -5,7 +5,7 @@ import ( airshipv1 "sipcluster/pkg/api/v1" ) -// ErrAuthTypeNotSupported is returned when wrong AuthType is provided +// ErrorConstraintNotFound is returned when wrong AuthType is provided type ErrorConstraintNotFound struct { } @@ -32,3 +32,12 @@ type ErrorHostIpNotFound struct { func (e ErrorHostIpNotFound) Error() string { return fmt.Sprintf("Unable to identify the vBMH Host %v IP address on interface %v required by Infrastructure Service %v %s ", e.HostName, e.IPInterface, e.ServiceName, e.Message) } + +// ErrorUknownSpreadTopology is returned when wrong AuthType is provided +type ErrorUknownSpreadTopology struct { + Topology airshipv1.SpreadTopology +} + +func (e ErrorUknownSpreadTopology) Error() string { + return fmt.Sprintf("Uknown spread topology '%s'", e.Topology) +} diff --git a/pkg/vbmh/machines.go b/pkg/vbmh/machines.go index 7237c4c..02cea74 100644 --- a/pkg/vbmh/machines.go +++ b/pkg/vbmh/machines.go @@ -239,34 +239,26 @@ func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster, bmhList *metal3.B return nil } -func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraints []airshipv1.SchedulingOptions) (map[airshipv1.SchedulingOptions]*ScheduleSet, error) { - logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", role) - setMap := make(map[airshipv1.SchedulingOptions]*ScheduleSet) - for _, constraint := range constraints { - logger := logger.WithValues("constraint", constraint) - var labelName string - switch constraint { - case airshipv1.RackAntiAffinity: - labelName = RackLabel - case airshipv1.ServerAntiAffinity: - labelName = ServerLabel - default: - logger.Info("constraint not supported") - continue - } - - logger.Info("Marking constraint as active") - setMap[constraint] = &ScheduleSet{ - active: true, - set: make(map[string]bool), - labelName: labelName, - } +func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraint airshipv1.SpreadTopology) (*ScheduleSet, error) { + logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", role, "spread topology", constraint) + var labelName string + switch constraint { + case airshipv1.RackAntiAffinity: + labelName = RackLabel + case airshipv1.ServerAntiAffinity: + labelName = ServerLabel + default: + logger.Info("constraint not supported") + return nil, ErrorUknownSpreadTopology{Topology: constraint} } - if len(setMap) > 0 { - return setMap, nil - } - return setMap, ErrorConstraintNotFound{} + logger.Info("Marking constraint as active") + return &ScheduleSet{ + active: true, + set: make(map[string]bool), + labelName: labelName, + }, nil + } func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles, c client.Client, sipCfg *airshipv1.SipConfig) int { @@ -308,7 +300,7 @@ func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles } func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.NodeSet, bmList *metal3.BareMetalHostList, - scheduleSetMap map[airshipv1.SchedulingOptions]*ScheduleSet, c client.Client, sipCfg *airshipv1.SipConfig) error { + scheduleSet *ScheduleSet, c client.Client, sipCfg *airshipv1.SipConfig) error { logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", nodeRole) validBmh := true // Count the expectations stated in the CR @@ -328,30 +320,27 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1. if !ml.hasMachine(bmh) { logger.Info("BaremetalHost not yet marked as ready to be scheduled") - for _, constraint := range nodeCfg.Scheduling { - // Do I care about this constraint - logger := logger.WithValues("constraint", constraint) - scheduleRule := scheduleSetMap[constraint] - if scheduleRule.Active() { - logger.Info("constraint is active") - // Check if bmh has the label - bmhConstraintCondition, flavorMatch := scheduleRule.GetLabels(bmh.Labels, nodeCfg.VmFlavor) - logger.Info("Checked BMH constraint condition and flavor match", - "constraint condition", bmhConstraintCondition, - "flavor match", flavorMatch) - validBmh = flavorMatch - // If it does match the flavor - if bmhConstraintCondition != "" && flavorMatch { - // If its in the list already for the constraint , theen this bmh is disqualified. Skip it - if scheduleRule.Exists(bmhConstraintCondition) { - logger.Info("Constraint slot is alrady taken some BMH from this constraint is already allocated, skipping it") - validBmh = false - break - } else { - scheduleRule.Add(bmhConstraintCondition) - } + constraint := nodeCfg.Scheduling + // Do I care about this constraint + logger := logger.WithValues("constraint", constraint) + if scheduleSet.Active() { + logger.Info("constraint is active") + // Check if bmh has the label + bmhConstraintCondition, flavorMatch := scheduleSet.GetLabels(bmh.Labels, nodeCfg.VmFlavor) + logger.Info("Checked BMH constraint condition and flavor match", + "constraint condition", bmhConstraintCondition, + "flavor match", flavorMatch) + validBmh = flavorMatch + // If it does match the flavor + if bmhConstraintCondition != "" && flavorMatch { + // If its in the list already for the constraint , theen this bmh is disqualified. Skip it + if scheduleSet.Exists(bmhConstraintCondition) { + logger.Info("Constraint slot is alrady taken some BMH from this constraint is already allocated, skipping it") + validBmh = false + break + } else { + scheduleSet.Add(bmhConstraintCondition) } - } } // All the constraints have been checked diff --git a/pkg/vbmh/vbmh_test.go b/pkg/vbmh/vbmh_test.go index 6a5667c..206546f 100644 --- a/pkg/vbmh/vbmh_test.go +++ b/pkg/vbmh/vbmh_test.go @@ -3,16 +3,16 @@ package vbmh import ( "fmt" + metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - mockClient "sigs.k8s.io/controller-runtime/pkg/client/fake" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" + mockClient "sigs.k8s.io/controller-runtime/pkg/client/fake" airshipv1 "sipcluster/pkg/api/v1" ) @@ -33,9 +33,9 @@ var _ = Describe("MachineList", func() { Namespace: "default", Labels: map[string]string{ "airshipit.org/vino-flavor": "master", - SipScheduleLabel: "false", - RackLabel: "r002", - ServerLabel: fmt.Sprintf("node0%dr002", n), + SipScheduleLabel: "false", + RackLabel: "r002", + ServerLabel: fmt.Sprintf("node0%dr002", n), }, }, Spec: metal3.BareMetalHostSpec{ @@ -51,11 +51,11 @@ var _ = Describe("MachineList", func() { machineList = &MachineList{ NamespacedName: types.NamespacedName{ - Name: "vbmh", + Name: "vbmh", Namespace: "default", }, Machines: nodes, - Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"), + Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"), } err := metal3.AddToScheme(scheme.Scheme)