Add spread topology instead of scheduling-constraint

This commit is contained in:
Kostiantyn Kalynovskyi 2020-12-02 17:31:57 -06:00
parent 1462a5327c
commit a3ac8ac7dc
9 changed files with 85 additions and 89 deletions

View File

@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.2.5 controller-gen.kubebuilder.io/version: (devel)
creationTimestamp: null creationTimestamp: null
name: sipclusters.airship.airshipit.org name: sipclusters.airship.airshipit.org
spec: spec:
@ -35,7 +35,7 @@ spec:
description: SIPClusterSpec defines the desired state of SIPCluster description: SIPClusterSpec defines the desired state of SIPCluster
properties: properties:
config: config:
description: SIPClusterSpec defines the desired state of SIPCluster description: SipConfig defines the desired state of SIPCluster
properties: properties:
cluster-name: cluster-name:
description: Cluster NAme to be used for labeling vBMH description: Cluster NAme to be used for labeling vBMH
@ -85,13 +85,11 @@ spec:
standby: standby:
type: integer type: integer
type: object type: object
scheduling-constraints: spreadTopology:
description: PlaceHolder until we define the real expected Implementation description: PlaceHolder until we define the real expected Implementation
Scheduling define constraints the allows the SIP Scheduler to Scheduling define constraints the allows the SIP Scheduler to
identify the required BMH's to allow CAPI to build a cluster identify the required BMH's to allow CAPI to build a cluster
items:
type: string type: string
type: array
vm-flavor: vm-flavor:
description: VmFlavor is essentially a Flavor label identifying description: VmFlavor is essentially a Flavor label identifying
the type of Node that meets the construction reqirements the type of Node that meets the construction reqirements
@ -117,5 +115,5 @@ status:
acceptedNames: acceptedNames:
kind: "" kind: ""
plural: "" plural: ""
conditions: [] conditions: null
storedVersions: [] storedVersions: null

View File

@ -26,3 +26,12 @@ rules:
- get - get
- patch - patch
- update - update
- apiGroups:
- metal3.io
resources:
- baremetalhosts
verbs:
- get
- list
- patch
- update

View File

@ -11,13 +11,13 @@ spec:
nodes: nodes:
worker: worker:
vm-flavor: 'airshipit.org/vino-flavor=worker' vm-flavor: 'airshipit.org/vino-flavor=worker'
scheduling-constraints: ['per-node'] # Support dont'care option. spreadTopology: 'per-node' # Support dont'care option.
count: count:
active: 2 #driven by capi node number active: 2 #driven by capi node number
standby: 1 #slew for upgrades etc standby: 1 #slew for upgrades etc
master: master:
vm-flavor: 'airshipit.org/vino-flavor=master' vm-flavor: 'airshipit.org/vino-flavor=master'
scheduling-constraints: ['per-node','per-rack'] spreadTopology: 'per-rack'
count: count:
active: 1 active: 1
standby: 1 standby: 1

View File

@ -43,7 +43,7 @@ type SIPClusterSpec struct {
InfraServices map[InfraService]InfraConfig `json:"infra"` InfraServices map[InfraService]InfraConfig `json:"infra"`
} }
// SIPClusterSpec defines the desired state of SIPCluster // SipConfig defines the desired state of SIPCluster
type SipConfig struct { type SipConfig struct {
// Cluster NAme to be used for labeling vBMH // Cluster NAme to be used for labeling vBMH
ClusterName string `json:"cluster-name,omitempty"` ClusterName string `json:"cluster-name,omitempty"`
@ -84,20 +84,20 @@ type NodeSet struct {
// Implementation // Implementation
// Scheduling define constraints the allows the SIP Scheduler // Scheduling define constraints the allows the SIP Scheduler
// to identify the required BMH's to allow CAPI to build a cluster // to identify the required BMH's to allow CAPI to build a cluster
Scheduling []SchedulingOptions `json:"scheduling-constraints,omitempty"` Scheduling SpreadTopology `json:"spreadTopology,omitempty"`
// Count defines the scale expectations for the Nodes // Count defines the scale expectations for the Nodes
Count *VmCount `json:"count,omitempty"` Count *VmCount `json:"count,omitempty"`
} }
type SchedulingOptions string type SpreadTopology string
// Possible Node or VM Roles for a Tenant // Possible Node or VM Roles for a Tenant
const ( const (
// RackAntiAffinity means the state is unknown // RackAntiAffinity means the state is unknown
RackAntiAffinity SchedulingOptions = "per-rack" RackAntiAffinity SpreadTopology = "per-rack"
// ServerAntiAffinity means the state is unknown // ServerAntiAffinity means the state is unknown
ServerAntiAffinity SchedulingOptions = "per-node" ServerAntiAffinity SpreadTopology = "per-node"
) )
type InfraConfig struct { type InfraConfig struct {

View File

@ -59,11 +59,6 @@ func (in *InfraConfig) DeepCopy() *InfraConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSet) DeepCopyInto(out *NodeSet) { func (in *NodeSet) DeepCopyInto(out *NodeSet) {
*out = *in *out = *in
if in.Scheduling != nil {
in, out := &in.Scheduling, &out.Scheduling
*out = make([]SchedulingOptions, len(*in))
copy(*out, *in)
}
if in.Count != nil { if in.Count != nil {
in, out := &in.Count, &out.Count in, out := &in.Count, &out.Count
*out = new(VmCount) *out = new(VmCount)

View File

@ -219,21 +219,17 @@ func createSIPCluster(name string, namespace string, masters int, workers int) *
ClusterName: name, ClusterName: name,
}, },
Nodes: map[airshipv1.VmRoles]airshipv1.NodeSet{ Nodes: map[airshipv1.VmRoles]airshipv1.NodeSet{
airshipv1.VmMaster: airshipv1.NodeSet{ airshipv1.VmMaster: {
VmFlavor: "airshipit.org/vino-flavor=master", VmFlavor: "airshipit.org/vino-flavor=master",
Scheduling: []airshipv1.SchedulingOptions{ Scheduling: airshipv1.ServerAntiAffinity,
airshipv1.ServerAntiAffinity,
},
Count: &airshipv1.VmCount{ Count: &airshipv1.VmCount{
Active: masters, Active: masters,
Standby: 0, Standby: 0,
}, },
}, },
airshipv1.VmWorker: airshipv1.NodeSet{ airshipv1.VmWorker: {
VmFlavor: "airshipit.org/vino-flavor=worker", VmFlavor: "airshipit.org/vino-flavor=worker",
Scheduling: []airshipv1.SchedulingOptions{ Scheduling: airshipv1.ServerAntiAffinity,
airshipv1.ServerAntiAffinity,
},
Count: &airshipv1.VmCount{ Count: &airshipv1.VmCount{
Active: workers, Active: workers,
Standby: 0, Standby: 0,

View File

@ -5,7 +5,7 @@ import (
airshipv1 "sipcluster/pkg/api/v1" airshipv1 "sipcluster/pkg/api/v1"
) )
// ErrAuthTypeNotSupported is returned when wrong AuthType is provided // ErrorConstraintNotFound is returned when wrong AuthType is provided
type ErrorConstraintNotFound struct { type ErrorConstraintNotFound struct {
} }
@ -32,3 +32,12 @@ type ErrorHostIpNotFound struct {
func (e ErrorHostIpNotFound) Error() string { func (e ErrorHostIpNotFound) Error() string {
return fmt.Sprintf("Unable to identify the vBMH Host %v IP address on interface %v required by Infrastructure Service %v %s ", e.HostName, e.IPInterface, e.ServiceName, e.Message) return fmt.Sprintf("Unable to identify the vBMH Host %v IP address on interface %v required by Infrastructure Service %v %s ", e.HostName, e.IPInterface, e.ServiceName, e.Message)
} }
// ErrorUknownSpreadTopology is returned when wrong AuthType is provided
type ErrorUknownSpreadTopology struct {
Topology airshipv1.SpreadTopology
}
func (e ErrorUknownSpreadTopology) Error() string {
return fmt.Sprintf("Uknown spread topology '%s'", e.Topology)
}

View File

@ -239,11 +239,8 @@ func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster, bmhList *metal3.B
return nil return nil
} }
func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraints []airshipv1.SchedulingOptions) (map[airshipv1.SchedulingOptions]*ScheduleSet, error) { func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraint airshipv1.SpreadTopology) (*ScheduleSet, error) {
logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", role) logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", role, "spread topology", constraint)
setMap := make(map[airshipv1.SchedulingOptions]*ScheduleSet)
for _, constraint := range constraints {
logger := logger.WithValues("constraint", constraint)
var labelName string var labelName string
switch constraint { switch constraint {
case airshipv1.RackAntiAffinity: case airshipv1.RackAntiAffinity:
@ -252,21 +249,16 @@ func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraints []ai
labelName = ServerLabel labelName = ServerLabel
default: default:
logger.Info("constraint not supported") logger.Info("constraint not supported")
continue return nil, ErrorUknownSpreadTopology{Topology: constraint}
} }
logger.Info("Marking constraint as active") logger.Info("Marking constraint as active")
setMap[constraint] = &ScheduleSet{ return &ScheduleSet{
active: true, active: true,
set: make(map[string]bool), set: make(map[string]bool),
labelName: labelName, labelName: labelName,
} }, nil
}
if len(setMap) > 0 {
return setMap, nil
}
return setMap, ErrorConstraintNotFound{}
} }
func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles, c client.Client, sipCfg *airshipv1.SipConfig) int { func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles, c client.Client, sipCfg *airshipv1.SipConfig) int {
@ -308,7 +300,7 @@ func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles
} }
func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.NodeSet, bmList *metal3.BareMetalHostList, func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.NodeSet, bmList *metal3.BareMetalHostList,
scheduleSetMap map[airshipv1.SchedulingOptions]*ScheduleSet, c client.Client, sipCfg *airshipv1.SipConfig) error { scheduleSet *ScheduleSet, c client.Client, sipCfg *airshipv1.SipConfig) error {
logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", nodeRole) logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", nodeRole)
validBmh := true validBmh := true
// Count the expectations stated in the CR // Count the expectations stated in the CR
@ -328,14 +320,13 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
if !ml.hasMachine(bmh) { if !ml.hasMachine(bmh) {
logger.Info("BaremetalHost not yet marked as ready to be scheduled") logger.Info("BaremetalHost not yet marked as ready to be scheduled")
for _, constraint := range nodeCfg.Scheduling { constraint := nodeCfg.Scheduling
// Do I care about this constraint // Do I care about this constraint
logger := logger.WithValues("constraint", constraint) logger := logger.WithValues("constraint", constraint)
scheduleRule := scheduleSetMap[constraint] if scheduleSet.Active() {
if scheduleRule.Active() {
logger.Info("constraint is active") logger.Info("constraint is active")
// Check if bmh has the label // Check if bmh has the label
bmhConstraintCondition, flavorMatch := scheduleRule.GetLabels(bmh.Labels, nodeCfg.VmFlavor) bmhConstraintCondition, flavorMatch := scheduleSet.GetLabels(bmh.Labels, nodeCfg.VmFlavor)
logger.Info("Checked BMH constraint condition and flavor match", logger.Info("Checked BMH constraint condition and flavor match",
"constraint condition", bmhConstraintCondition, "constraint condition", bmhConstraintCondition,
"flavor match", flavorMatch) "flavor match", flavorMatch)
@ -343,16 +334,14 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
// If it does match the flavor // If it does match the flavor
if bmhConstraintCondition != "" && flavorMatch { if bmhConstraintCondition != "" && flavorMatch {
// If its in the list already for the constraint , theen this bmh is disqualified. Skip it // If its in the list already for the constraint , theen this bmh is disqualified. Skip it
if scheduleRule.Exists(bmhConstraintCondition) { if scheduleSet.Exists(bmhConstraintCondition) {
logger.Info("Constraint slot is alrady taken some BMH from this constraint is already allocated, skipping it") logger.Info("Constraint slot is alrady taken some BMH from this constraint is already allocated, skipping it")
validBmh = false validBmh = false
break break
} else { } else {
scheduleRule.Add(bmhConstraintCondition) scheduleSet.Add(bmhConstraintCondition)
} }
} }
}
} }
// All the constraints have been checked // All the constraints have been checked
// Only if its not in the list already // Only if its not in the list already

View File

@ -3,16 +3,16 @@ package vbmh
import ( import (
"fmt" "fmt"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
mockClient "sigs.k8s.io/controller-runtime/pkg/client/fake"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime" ctrl "sigs.k8s.io/controller-runtime"
mockClient "sigs.k8s.io/controller-runtime/pkg/client/fake"
airshipv1 "sipcluster/pkg/api/v1" airshipv1 "sipcluster/pkg/api/v1"
) )