Add spread topology instead of scheduling-constraint

This commit is contained in:
Kostiantyn Kalynovskyi 2020-12-02 17:31:57 -06:00
parent 1462a5327c
commit a3ac8ac7dc
9 changed files with 85 additions and 89 deletions

View File

@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.5
controller-gen.kubebuilder.io/version: (devel)
creationTimestamp: null
name: sipclusters.airship.airshipit.org
spec:
@ -35,7 +35,7 @@ spec:
description: SIPClusterSpec defines the desired state of SIPCluster
properties:
config:
description: SIPClusterSpec defines the desired state of SIPCluster
description: SipConfig defines the desired state of SIPCluster
properties:
cluster-name:
description: Cluster NAme to be used for labeling vBMH
@ -85,13 +85,11 @@ spec:
standby:
type: integer
type: object
scheduling-constraints:
spreadTopology:
description: PlaceHolder until we define the real expected Implementation
Scheduling define constraints the allows the SIP Scheduler to
identify the required BMH's to allow CAPI to build a cluster
items:
type: string
type: array
type: string
vm-flavor:
description: VmFlavor is essentially a Flavor label identifying
the type of Node that meets the construction reqirements
@ -117,5 +115,5 @@ status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
conditions: null
storedVersions: null

View File

@ -26,3 +26,12 @@ rules:
- get
- patch
- update
- apiGroups:
- metal3.io
resources:
- baremetalhosts
verbs:
- get
- list
- patch
- update

View File

@ -11,13 +11,13 @@ spec:
nodes:
worker:
vm-flavor: 'airshipit.org/vino-flavor=worker'
scheduling-constraints: ['per-node'] # Support dont'care option.
spreadTopology: 'per-node' # Support dont'care option.
count:
active: 2 #driven by capi node number
standby: 1 #slew for upgrades etc
master:
vm-flavor: 'airshipit.org/vino-flavor=master'
scheduling-constraints: ['per-node','per-rack']
spreadTopology: 'per-rack'
count:
active: 1
standby: 1

View File

@ -43,7 +43,7 @@ type SIPClusterSpec struct {
InfraServices map[InfraService]InfraConfig `json:"infra"`
}
// SIPClusterSpec defines the desired state of SIPCluster
// SipConfig defines the desired state of SIPCluster
type SipConfig struct {
// Cluster NAme to be used for labeling vBMH
ClusterName string `json:"cluster-name,omitempty"`
@ -84,20 +84,20 @@ type NodeSet struct {
// Implementation
// Scheduling define constraints the allows the SIP Scheduler
// to identify the required BMH's to allow CAPI to build a cluster
Scheduling []SchedulingOptions `json:"scheduling-constraints,omitempty"`
Scheduling SpreadTopology `json:"spreadTopology,omitempty"`
// Count defines the scale expectations for the Nodes
Count *VmCount `json:"count,omitempty"`
}
type SchedulingOptions string
type SpreadTopology string
// Possible Node or VM Roles for a Tenant
const (
// RackAntiAffinity means the state is unknown
RackAntiAffinity SchedulingOptions = "per-rack"
RackAntiAffinity SpreadTopology = "per-rack"
// ServerAntiAffinity means the state is unknown
ServerAntiAffinity SchedulingOptions = "per-node"
ServerAntiAffinity SpreadTopology = "per-node"
)
type InfraConfig struct {

View File

@ -59,11 +59,6 @@ func (in *InfraConfig) DeepCopy() *InfraConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSet) DeepCopyInto(out *NodeSet) {
*out = *in
if in.Scheduling != nil {
in, out := &in.Scheduling, &out.Scheduling
*out = make([]SchedulingOptions, len(*in))
copy(*out, *in)
}
if in.Count != nil {
in, out := &in.Count, &out.Count
*out = new(VmCount)

View File

@ -219,21 +219,17 @@ func createSIPCluster(name string, namespace string, masters int, workers int) *
ClusterName: name,
},
Nodes: map[airshipv1.VmRoles]airshipv1.NodeSet{
airshipv1.VmMaster: airshipv1.NodeSet{
VmFlavor: "airshipit.org/vino-flavor=master",
Scheduling: []airshipv1.SchedulingOptions{
airshipv1.ServerAntiAffinity,
},
airshipv1.VmMaster: {
VmFlavor: "airshipit.org/vino-flavor=master",
Scheduling: airshipv1.ServerAntiAffinity,
Count: &airshipv1.VmCount{
Active: masters,
Standby: 0,
},
},
airshipv1.VmWorker: airshipv1.NodeSet{
VmFlavor: "airshipit.org/vino-flavor=worker",
Scheduling: []airshipv1.SchedulingOptions{
airshipv1.ServerAntiAffinity,
},
airshipv1.VmWorker: {
VmFlavor: "airshipit.org/vino-flavor=worker",
Scheduling: airshipv1.ServerAntiAffinity,
Count: &airshipv1.VmCount{
Active: workers,
Standby: 0,

View File

@ -5,7 +5,7 @@ import (
airshipv1 "sipcluster/pkg/api/v1"
)
// ErrAuthTypeNotSupported is returned when wrong AuthType is provided
// ErrorConstraintNotFound is returned when wrong AuthType is provided
type ErrorConstraintNotFound struct {
}
@ -32,3 +32,12 @@ type ErrorHostIpNotFound struct {
func (e ErrorHostIpNotFound) Error() string {
return fmt.Sprintf("Unable to identify the vBMH Host %v IP address on interface %v required by Infrastructure Service %v %s ", e.HostName, e.IPInterface, e.ServiceName, e.Message)
}
// ErrorUknownSpreadTopology is returned when wrong AuthType is provided
type ErrorUknownSpreadTopology struct {
Topology airshipv1.SpreadTopology
}
func (e ErrorUknownSpreadTopology) Error() string {
return fmt.Sprintf("Uknown spread topology '%s'", e.Topology)
}

View File

@ -239,34 +239,26 @@ func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster, bmhList *metal3.B
return nil
}
func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraints []airshipv1.SchedulingOptions) (map[airshipv1.SchedulingOptions]*ScheduleSet, error) {
logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", role)
setMap := make(map[airshipv1.SchedulingOptions]*ScheduleSet)
for _, constraint := range constraints {
logger := logger.WithValues("constraint", constraint)
var labelName string
switch constraint {
case airshipv1.RackAntiAffinity:
labelName = RackLabel
case airshipv1.ServerAntiAffinity:
labelName = ServerLabel
default:
logger.Info("constraint not supported")
continue
}
logger.Info("Marking constraint as active")
setMap[constraint] = &ScheduleSet{
active: true,
set: make(map[string]bool),
labelName: labelName,
}
func (ml *MachineList) initScheduleMaps(role airshipv1.VmRoles, constraint airshipv1.SpreadTopology) (*ScheduleSet, error) {
logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", role, "spread topology", constraint)
var labelName string
switch constraint {
case airshipv1.RackAntiAffinity:
labelName = RackLabel
case airshipv1.ServerAntiAffinity:
labelName = ServerLabel
default:
logger.Info("constraint not supported")
return nil, ErrorUknownSpreadTopology{Topology: constraint}
}
if len(setMap) > 0 {
return setMap, nil
}
return setMap, ErrorConstraintNotFound{}
logger.Info("Marking constraint as active")
return &ScheduleSet{
active: true,
set: make(map[string]bool),
labelName: labelName,
}, nil
}
func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles, c client.Client, sipCfg *airshipv1.SipConfig) int {
@ -308,7 +300,7 @@ func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VmRoles
}
func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.NodeSet, bmList *metal3.BareMetalHostList,
scheduleSetMap map[airshipv1.SchedulingOptions]*ScheduleSet, c client.Client, sipCfg *airshipv1.SipConfig) error {
scheduleSet *ScheduleSet, c client.Client, sipCfg *airshipv1.SipConfig) error {
logger := ml.Log.WithValues("SIPCluster", ml.NamespacedName, "role", nodeRole)
validBmh := true
// Count the expectations stated in the CR
@ -328,30 +320,27 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VmRoles, nodeCfg airshipv1.
if !ml.hasMachine(bmh) {
logger.Info("BaremetalHost not yet marked as ready to be scheduled")
for _, constraint := range nodeCfg.Scheduling {
// Do I care about this constraint
logger := logger.WithValues("constraint", constraint)
scheduleRule := scheduleSetMap[constraint]
if scheduleRule.Active() {
logger.Info("constraint is active")
// Check if bmh has the label
bmhConstraintCondition, flavorMatch := scheduleRule.GetLabels(bmh.Labels, nodeCfg.VmFlavor)
logger.Info("Checked BMH constraint condition and flavor match",
"constraint condition", bmhConstraintCondition,
"flavor match", flavorMatch)
validBmh = flavorMatch
// If it does match the flavor
if bmhConstraintCondition != "" && flavorMatch {
// If its in the list already for the constraint , theen this bmh is disqualified. Skip it
if scheduleRule.Exists(bmhConstraintCondition) {
logger.Info("Constraint slot is alrady taken some BMH from this constraint is already allocated, skipping it")
validBmh = false
break
} else {
scheduleRule.Add(bmhConstraintCondition)
}
constraint := nodeCfg.Scheduling
// Do I care about this constraint
logger := logger.WithValues("constraint", constraint)
if scheduleSet.Active() {
logger.Info("constraint is active")
// Check if bmh has the label
bmhConstraintCondition, flavorMatch := scheduleSet.GetLabels(bmh.Labels, nodeCfg.VmFlavor)
logger.Info("Checked BMH constraint condition and flavor match",
"constraint condition", bmhConstraintCondition,
"flavor match", flavorMatch)
validBmh = flavorMatch
// If it does match the flavor
if bmhConstraintCondition != "" && flavorMatch {
// If its in the list already for the constraint , theen this bmh is disqualified. Skip it
if scheduleSet.Exists(bmhConstraintCondition) {
logger.Info("Constraint slot is alrady taken some BMH from this constraint is already allocated, skipping it")
validBmh = false
break
} else {
scheduleSet.Add(bmhConstraintCondition)
}
}
}
// All the constraints have been checked

View File

@ -3,16 +3,16 @@ package vbmh
import (
"fmt"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
mockClient "sigs.k8s.io/controller-runtime/pkg/client/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
mockClient "sigs.k8s.io/controller-runtime/pkg/client/fake"
airshipv1 "sipcluster/pkg/api/v1"
)
@ -33,9 +33,9 @@ var _ = Describe("MachineList", func() {
Namespace: "default",
Labels: map[string]string{
"airshipit.org/vino-flavor": "master",
SipScheduleLabel: "false",
RackLabel: "r002",
ServerLabel: fmt.Sprintf("node0%dr002", n),
SipScheduleLabel: "false",
RackLabel: "r002",
ServerLabel: fmt.Sprintf("node0%dr002", n),
},
},
Spec: metal3.BareMetalHostSpec{
@ -51,11 +51,11 @@ var _ = Describe("MachineList", func() {
machineList = &MachineList{
NamespacedName: types.NamespacedName{
Name: "vbmh",
Name: "vbmh",
Namespace: "default",
},
Machines: nodes,
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
}
err := metal3.AddToScheme(scheme.Scheme)