diff --git a/README.md b/README.md
index 224c300..76e3b51 100644
--- a/README.md
+++ b/README.md
@@ -23,8 +23,8 @@ While ViNO is responsible for setting up VM infrastructure, such as:
- networking
- bmh objects, with labels:
* location - i.e. `rack: 8` and `node: rdm8r008c002` - should follow k8s semi-standard
- * vm role - i.e. `node-type: worker`
- * vm flavor - i.e `node-flavor: foobar`
+ * role - i.e. `node-type: worker`
+ * flavor - i.e `node-flavor: foobar`
* networks - i.e. `networks: [foo, bar]`
and the details for ViNO can be found [here](https://hackmd.io/KSu8p4QeTc2kXIjlrso2eA)
diff --git a/config/crd/bases/airship.airshipit.org_sipclusters.yaml b/config/crd/bases/airship.airshipit.org_sipclusters.yaml
index 4d4d5e5..a939843 100644
--- a/config/crd/bases/airship.airshipit.org_sipclusters.yaml
+++ b/config/crd/bases/airship.airshipit.org_sipclusters.yaml
@@ -55,6 +55,50 @@ spec:
standby:
type: integer
type: object
+ labelSelector:
+ description: LabelSelector is the BMH label selector to use.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
+ that contains values, a key, and an operator that relates
+ the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship
+ to a set of values. Valid operators are In, NotIn,
+ Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If
+ the operator is In or NotIn, the values array must
+ be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced
+ during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A
+ single {key,value} in the matchLabels map is equivalent
+ to an element of matchExpressions, whose key field is "key",
+ the operator is "In", and the values array contains only
+ "value". The requirements are ANDed.
+ type: object
+ type: object
spreadTopology:
description: PlaceHolder until we define the real expected Implementation
Scheduling define constraints that allow the SIP Scheduler to
@@ -63,12 +107,8 @@ spec:
- PerRack
- PerHost
type: string
- vmFlavor:
- description: VMFlavor is essentially a Flavor label identifying
- the type of Node that meets the construction reqirements
- type: string
type: object
- description: Nodes defines the set of nodes to schedule for each vm
+ description: Nodes defines the set of nodes to schedule for each BMH
role.
type: object
services:
diff --git a/config/samples/airship_v1beta1_sipcluster.yaml b/config/samples/airship_v1beta1_sipcluster.yaml
index d44f34a..bd5225b 100644
--- a/config/samples/airship_v1beta1_sipcluster.yaml
+++ b/config/samples/airship_v1beta1_sipcluster.yaml
@@ -8,13 +8,15 @@ metadata:
spec:
nodes:
ControlPlane:
- vmFlavor: vino.airshipit.org/flavor=control-plane
+ labelSelector:
+ vino.airshipit.org/flavor: control-plane
spreadTopology: PerRack
count:
active: 1
standby: 1
Worker:
- vmFlavor: vino.airshipit.org/flavor=worker
+ labelSelector:
+ vino.airshipit.org/flavor: worker
spreadTopology: PerHost
count:
active: 1
diff --git a/docs/api/sipcluster.md b/docs/api/sipcluster.md
index 018e46a..26eff59 100644
--- a/docs/api/sipcluster.md
+++ b/docs/api/sipcluster.md
@@ -40,6 +40,10 @@ bool
+
BMHRole
+(string
alias)
+BMHRole defines the states the provisioner will report
+the tenant has having.
JumpHostService
@@ -112,6 +116,49 @@ directory, and then configured as identity files in the SSH config file of the d
+
NodeCount
+
+
+(Appears on:
+NodeSet)
+
+NodeCount
+
NodeSet
@@ -139,14 +186,15 @@ Such as :
-vmFlavor
+labelSelector
-string
+
+Kubernetes meta/v1.LabelSelector
+
|
- VMFlavor is essentially a Flavor label identifying the
-type of Node that meets the construction reqirements
+LabelSelector is the BMH label selector to use.
|
@@ -169,8 +217,8 @@ to identify the required BMH’s to allow CAPI to build a cluster
count
-
-VMCount
+
+NodeCount
|
@@ -227,12 +275,12 @@ SIPClusterSpec
nodes
-map[./pkg/api/v1.VMRole]./pkg/api/v1.NodeSet
+map[./pkg/api/v1.BMHRole]./pkg/api/v1.NodeSet
- Nodes defines the set of nodes to schedule for each vm role.
+Nodes defines the set of nodes to schedule for each BMH role.
|
@@ -420,12 +468,12 @@ string
nodes
-map[./pkg/api/v1.VMRole]./pkg/api/v1.NodeSet
+map[./pkg/api/v1.BMHRole]./pkg/api/v1.NodeSet
- Nodes defines the set of nodes to schedule for each vm role.
+Nodes defines the set of nodes to schedule for each BMH role.
|
@@ -484,53 +532,6 @@ SIPClusterServices
(Appears on:
NodeSet)
-VMCount
-
-
-(Appears on:
-NodeSet)
-
-VMCount
-
-VMRole
-(string
alias)
-VMRole defines the states the provisioner will report
-the tenant has having.
This page was automatically generated with gen-crd-api-reference-docs
diff --git a/pkg/api/v1/sipcluster_types.go b/pkg/api/v1/sipcluster_types.go
index f87a543..4ee70f6 100644
--- a/pkg/api/v1/sipcluster_types.go
+++ b/pkg/api/v1/sipcluster_types.go
@@ -49,8 +49,8 @@ type SIPClusterSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make manifests to regenerate code after modifying this file
- // Nodes defines the set of nodes to schedule for each vm role.
- Nodes map[VMRole]NodeSet `json:"nodes,omitempty"`
+ // Nodes defines the set of nodes to schedule for each BMH role.
+ Nodes map[BMHRole]NodeSet `json:"nodes,omitempty"`
// Services defines the services that are deployed when a SIPCluster is provisioned.
Services SIPClusterServices `json:"services"`
@@ -109,7 +109,7 @@ const (
ReasonTypeProgressing string = "Progressing"
// ReasonTypeUnableToApplyLabels indicates that a resource has a specified condition because SIP was unable to
- // apply labels to vBMHs for the SIPCluster.
+ // apply labels to BMHs for the SIPCluster.
ReasonTypeUnableToApplyLabels string = "UnableToApplyLabels"
// ReasonTypeUnableToDecommission indicates that a resource has a specified condition because SIP was unable to
@@ -117,7 +117,7 @@ const (
ReasonTypeUnableToDecommission string = "UnableToDecommission"
// ReasonTypeUnschedulable indicates that a resource has a specified condition because SIP was unable to
- // schedule vBMHs for the SIPCluster.
+ // schedule BMHs for the SIPCluster.
ReasonTypeUnschedulable string = "Unschedulable"
// ReasonTypeReconciliationSucceeded indicates that a resource has a specified condition because SIP completed
@@ -137,16 +137,15 @@ const (
//
type NodeSet struct {
- // VMFlavor is essentially a Flavor label identifying the
- // type of Node that meets the construction reqirements
- VMFlavor string `json:"vmFlavor,omitempty"`
+ // LabelSelector is the BMH label selector to use.
+ LabelSelector metav1.LabelSelector `json:"labelSelector,omitempty"`
// PlaceHolder until we define the real expected
// Implementation
// Scheduling define constraints that allow the SIP Scheduler
// to identify the required BMH's to allow CAPI to build a cluster
Scheduling SpreadTopology `json:"spreadTopology,omitempty"`
// Count defines the scale expectations for the Nodes
- Count *VMCount `json:"count,omitempty"`
+ Count *NodeCount `json:"count,omitempty"`
}
// +kubebuilder:validation:Enum=PerRack;PerHost
@@ -173,18 +172,18 @@ type BMCOpts struct {
Proxy bool `json:"proxy,omitempty"`
}
-// VMRole defines the states the provisioner will report
+// BMHRole defines the states the provisioner will report
// the tenant has having.
-type VMRole string
+type BMHRole string
-// Possible Node or VM Roles for a Tenant
+// Possible BMH Roles for a Tenant
const (
- VMControlPlane VMRole = "ControlPlane"
- VMWorker = "Worker"
+ RoleControlPlane BMHRole = "ControlPlane"
+ RoleWorker = "Worker"
)
-// VMCount
-type VMCount struct {
+// NodeCount
+type NodeCount struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
Active int `json:"active,omitempty"`
diff --git a/pkg/api/v1/zz_generated.deepcopy.go b/pkg/api/v1/zz_generated.deepcopy.go
index c447709..6a731f9 100644
--- a/pkg/api/v1/zz_generated.deepcopy.go
+++ b/pkg/api/v1/zz_generated.deepcopy.go
@@ -66,12 +66,28 @@ func (in *JumpHostService) DeepCopy() *JumpHostService {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeCount) DeepCopyInto(out *NodeCount) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCount.
+func (in *NodeCount) DeepCopy() *NodeCount {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeCount)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSet) DeepCopyInto(out *NodeSet) {
*out = *in
+ in.LabelSelector.DeepCopyInto(&out.LabelSelector)
if in.Count != nil {
in, out := &in.Count, &out.Count
- *out = new(VMCount)
+ *out = new(NodeCount)
**out = **in
}
}
@@ -213,7 +229,7 @@ func (in *SIPClusterSpec) DeepCopyInto(out *SIPClusterSpec) {
*out = *in
if in.Nodes != nil {
in, out := &in.Nodes, &out.Nodes
- *out = make(map[VMRole]NodeSet, len(*in))
+ *out = make(map[BMHRole]NodeSet, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
@@ -252,18 +268,3 @@ func (in *SIPClusterStatus) DeepCopy() *SIPClusterStatus {
in.DeepCopyInto(out)
return out
}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *VMCount) DeepCopyInto(out *VMCount) {
- *out = *in
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMCount.
-func (in *VMCount) DeepCopy() *VMCount {
- if in == nil {
- return nil
- }
- out := new(VMCount)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/pkg/vbmh/machines.go b/pkg/bmh/bmh.go
similarity index 90%
rename from pkg/vbmh/machines.go
rename to pkg/bmh/bmh.go
index 31d0683..aabfb68 100644
--- a/pkg/vbmh/machines.go
+++ b/pkg/bmh/bmh.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package vbmh
+package bmh
import (
"context"
@@ -29,6 +29,8 @@ import (
"github.com/go-logr/logr"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
kerror "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -36,12 +38,12 @@ import (
type ScheduledState string
-// Possible Node or VM Roles for a Tenant
+// Possible Node or BMH roles for a Tenant
const (
- // ToBeScheduled means that the VM was identified by the scheduler to be selected
+ // ToBeScheduled means that the BMH was identified by the scheduler to be selected
ToBeScheduled ScheduledState = "Selected"
- // Scheduled means the BMH / VM already has a label implying it
+ // Scheduled means the BMH already has a label implying it
// was previously scheduled
Scheduled ScheduledState = "Scheduled"
@@ -69,7 +71,7 @@ const (
RackLabel = BaseAirshipSelector + "/rack"
ServerLabel = BaseAirshipSelector + "/server"
- // Thislabekl is associated to group the colletcion of scheduled vBMH's
+ // Thislabekl is associated to group the colletcion of scheduled BMH's
// Will represent the Tenant Cluster or Service Function Cluster
SipClusterLabelName = "workload-cluster"
SipClusterLabel = BaseAirshipSelector + "/" + SipClusterLabelName
@@ -93,7 +95,7 @@ type Machine struct {
// scheduleLabels
// I expect to build this over time / if not might not be needed
ScheduleLabels map[string]string
- VMRole airshipv1.VMRole
+ BMHRole airshipv1.BMHRole
// Data will contain whatever information is needed from the server
// IF it ends up een just the IP then maybe we can collapse into a field
Data *MachineData
@@ -101,11 +103,11 @@ type Machine struct {
func (m *Machine) String() string {
// TODO(howell): cleanup this manual marshaling
- return fmt.Sprintf("Machine {\n\tBmh:%s\n\tScheduleStatus:%s\n\tVMRole:%v\n}\n",
- m.BMH.ObjectMeta.Name, m.ScheduleStatus, m.VMRole)
+ return fmt.Sprintf("Machine {\n\tBmh:%s\n\tScheduleStatus:%s\n\tBMHRole:%v\n}\n",
+ m.BMH.ObjectMeta.Name, m.ScheduleStatus, m.BMHRole)
}
-func NewMachine(bmh metal3.BareMetalHost, nodeRole airshipv1.VMRole, schedState ScheduledState) (m *Machine, e error) {
+func NewMachine(bmh metal3.BareMetalHost, nodeRole airshipv1.BMHRole, schedState ScheduledState) (m *Machine, e error) {
// Add logic to check if required fields exist.
if bmh.Spec.NetworkData == nil {
return nil, &ErrorNetworkDataNotFound{BMH: bmh}
@@ -113,7 +115,7 @@ func NewMachine(bmh metal3.BareMetalHost, nodeRole airshipv1.VMRole, schedState
return &Machine{
BMH: bmh,
ScheduleStatus: schedState,
- VMRole: nodeRole,
+ BMHRole: nodeRole,
Data: &MachineData{
IPOnInterface: make(map[string]string),
},
@@ -131,10 +133,10 @@ type MachineData struct {
// MachineList contains the list of Scheduled or ToBeScheduled machines
type MachineList struct {
NamespacedName types.NamespacedName
- // ViNO Machines
+ // Machines
Machines map[string]*Machine
// Keep track of how many we have mark for scheduled.
- ReadyForScheduleCount map[airshipv1.VMRole]int
+ ReadyForScheduleCount map[airshipv1.BMHRole]int
Log logr.Logger
}
@@ -160,13 +162,13 @@ func (ml *MachineList) Schedule(sip airshipv1.SIPCluster, c client.Client) error
// Initialize the Target list
ml.init(sip.Spec.Nodes)
- // IDentify vBMH's that meet the appropriate selction criteria
+ // IDentify BMH's that meet the appropriate selction criteria
bmhList, err := ml.getBMHs(c)
if err != nil {
return err
}
- // Identify and Select the vBMH I actually will use
+ // Identify and Select the BMH I actually will use
err = ml.identifyNodes(sip, bmhList, c)
if err != nil {
return err
@@ -179,7 +181,7 @@ func (ml *MachineList) Schedule(sip airshipv1.SIPCluster, c client.Client) error
return nil
}
-func (ml *MachineList) init(nodes map[airshipv1.VMRole]airshipv1.NodeSet) {
+func (ml *MachineList) init(nodes map[airshipv1.BMHRole]airshipv1.NodeSet) {
// Only Initialize 1st time
if len(ml.Machines) == 0 {
mlSize := 0
@@ -189,7 +191,7 @@ func (ml *MachineList) init(nodes map[airshipv1.VMRole]airshipv1.NodeSet) {
mlNodeTypes++
}
fmt.Printf("Schedule.init mlSize:%d\n", mlSize)
- ml.ReadyForScheduleCount = make(map[airshipv1.VMRole]int, mlNodeTypes)
+ ml.ReadyForScheduleCount = make(map[airshipv1.BMHRole]int, mlNodeTypes)
ml.Machines = make(map[string]*Machine, 0)
}
}
@@ -216,12 +218,12 @@ func (ml *MachineList) getBMHs(c client.Client) (*metal3.BareMetalHostList, erro
if len(bmhList.Items) > 0 {
return bmhList, nil
}
- return bmhList, fmt.Errorf("Unable to identify vBMH available for scheduling. Selecting %v ", scheduleLabels)
+ return bmhList, fmt.Errorf("Unable to identify BMH available for scheduling. Selecting %v ", scheduleLabels)
}
func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster,
bmhList *metal3.BareMetalHostList, c client.Client) error {
- // If using the SIP Sheduled label, we now have a list of vBMH;'s
+ // If using the SIP Sheduled label, we now have a list of BMH;'s
// that are not scheduled
// Next I need to apply the constraints
@@ -248,7 +250,7 @@ func (ml *MachineList) identifyNodes(sip airshipv1.SIPCluster,
return nil
}
-func (ml *MachineList) initScheduleMaps(role airshipv1.VMRole,
+func (ml *MachineList) initScheduleMaps(role airshipv1.BMHRole,
constraint airshipv1.SpreadTopology) (*ScheduleSet, error) {
logger := ml.Log.WithValues("role", role, "spread topology", constraint)
var labelName string
@@ -270,7 +272,7 @@ func (ml *MachineList) initScheduleMaps(role airshipv1.VMRole,
}, nil
}
-func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VMRole,
+func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.BMHRole,
c client.Client, namespace string) int {
bmhList := &metal3.BareMetalHostList{}
@@ -310,11 +312,11 @@ func (ml *MachineList) countScheduledAndTobeScheduled(nodeRole airshipv1.VMRole,
// ReadyForScheduleCount should include:
// - New added in previous iteratins tagged as ToBeScheduled
// - Count for those Added previously but now tagged as UnableToSchedule
- // - New vBMH Machines already tagged as as Scheduled
+ // - New BMH Machines already tagged as as Scheduled
return ml.ReadyForScheduleCount[nodeRole]
}
-func (ml *MachineList) scheduleIt(nodeRole airshipv1.VMRole, nodeCfg airshipv1.NodeSet,
+func (ml *MachineList) scheduleIt(nodeRole airshipv1.BMHRole, nodeCfg airshipv1.NodeSet,
bmList *metal3.BareMetalHostList, scheduleSet *ScheduleSet,
c client.Client, namespace string) error {
logger := ml.Log.WithValues("role", nodeRole)
@@ -343,13 +345,16 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VMRole, nodeCfg airshipv1.N
if scheduleSet.Active() {
logger.Info("constraint is active")
// Check if bmh has the label
- bmhConstraintCondition, flavorMatch := scheduleSet.GetLabels(bmh.Labels, nodeCfg.VMFlavor)
- logger.Info("Checked BMH constraint condition and flavor match",
+ bmhConstraintCondition, match, err := scheduleSet.GetLabels(labels.Set(bmh.Labels), &nodeCfg.LabelSelector)
+ if err != nil {
+ return err
+ }
+ logger.Info("Checked BMH constraint condition and label selector",
"constraint condition", bmhConstraintCondition,
- "flavor match", flavorMatch)
- validBmh = flavorMatch
+ "label selector match", match)
+ validBmh = match
// If it does match the flavor
- if bmhConstraintCondition != "" && flavorMatch {
+ if bmhConstraintCondition != "" && match {
// If its in the list already for the constraint , theen this bmh is disqualified. Skip it
if scheduleSet.Exists(bmhConstraintCondition) {
logger.Info("Constraint slot is alrady taken some BMH from this constraint is already allocated, skipping it")
@@ -389,8 +394,8 @@ func (ml *MachineList) scheduleIt(nodeRole airshipv1.VMRole, nodeCfg airshipv1.N
if nodeTarget > 0 {
logger.Info("Failed to get enough BMHs to complete scheduling")
return ErrorUnableToFullySchedule{
- TargetNode: nodeRole,
- TargetFlavor: nodeCfg.VMFlavor,
+ TargetNode: nodeRole,
+ TargetLabelSelector: nodeCfg.LabelSelector,
}
}
return nil
@@ -421,7 +426,7 @@ func (ml *MachineList) ExtrapolateServiceAddresses(sip airshipv1.SIPCluster, c c
"Secret Namespace", machine.BMH.Spec.NetworkData.Namespace)
machine.ScheduleStatus = UnableToSchedule
- ml.ReadyForScheduleCount[machine.VMRole]--
+ ml.ReadyForScheduleCount[machine.BMHRole]--
extrapolateErrs = kerror.NewAggregate([]error{extrapolateErrs, err})
continue
@@ -435,7 +440,7 @@ func (ml *MachineList) ExtrapolateServiceAddresses(sip airshipv1.SIPCluster, c c
"Secret Namespace", machine.BMH.Spec.NetworkData.Namespace)
machine.ScheduleStatus = UnableToSchedule
- ml.ReadyForScheduleCount[machine.VMRole]--
+ ml.ReadyForScheduleCount[machine.BMHRole]--
extrapolateErrs = kerror.NewAggregate([]error{extrapolateErrs, err})
}
}
@@ -462,7 +467,7 @@ func (ml *MachineList) ExtrapolateBMCAuth(sip airshipv1.SIPCluster, c client.Cli
"Secret Namespace", machine.BMH.Namespace)
machine.ScheduleStatus = UnableToSchedule
- ml.ReadyForScheduleCount[machine.VMRole]--
+ ml.ReadyForScheduleCount[machine.BMHRole]--
extrapolateErrs = kerror.NewAggregate([]error{extrapolateErrs, err})
continue
@@ -476,7 +481,7 @@ func (ml *MachineList) ExtrapolateBMCAuth(sip airshipv1.SIPCluster, c client.Cli
"Secret Namespace", machine.BMH.Namespace)
machine.ScheduleStatus = UnableToSchedule
- ml.ReadyForScheduleCount[machine.VMRole]--
+ ml.ReadyForScheduleCount[machine.BMHRole]--
extrapolateErrs = kerror.NewAggregate([]error{extrapolateErrs, err})
}
}
@@ -717,22 +722,22 @@ func (ss *ScheduleSet) Exists(value string) bool {
func (ss *ScheduleSet) Add(labelValue string) {
ss.set[labelValue] = true
}
-func (ss *ScheduleSet) GetLabels(labels map[string]string, flavorLabel string) (string, bool) {
- fmt.Printf("Schedule.scheduleIt.GetLabels labels:%v, flavorLabel:%s\n", labels, flavorLabel)
+func (ss *ScheduleSet) GetLabels(labels labels.Labels, labelSelector *metav1.LabelSelector) (string, bool, error) {
+ fmt.Printf("Schedule.scheduleIt.GetLabels labels:%v, labelSelector:%s\n", labels, labelSelector)
+
+ match := false
if labels == nil {
- return "", false
+ return "", match, nil
}
- cl := strings.Split(flavorLabel, "=")
- if len(cl) > 0 {
- flavorLabelValue := cl[1]
- flavorLabelName := cl[0]
- return labels[ss.labelName], labels[flavorLabelName] == flavorLabelValue
+ selector, err := metav1.LabelSelectorAsSelector(labelSelector)
+ if err == nil {
+ match = selector.Matches(labels)
}
- return labels[ss.labelName], false
+ return labels.Get(ss.labelName), match, err
}
/*
-ApplyLabel : marks the appropriate machine labels to the vBMH's that
+ApplyLabel : marks the appropriate machine labels to the BMH's that
have benn selected by the scheduling.
This is done only after the Infrastcuture Services have been deployed
*/
@@ -745,7 +750,7 @@ func (ml *MachineList) ApplyLabels(sip airshipv1.SIPCluster, c client.Client) er
fmt.Printf("ApplyLabels bmh.ObjectMeta.Name:%s\n", bmh.ObjectMeta.Name)
bmh.Labels[SipClusterLabel] = sip.GetNamespace()
bmh.Labels[SipScheduleLabel] = "true"
- bmh.Labels[SipNodeTypeLabel] = string(machine.VMRole)
+ bmh.Labels[SipNodeTypeLabel] = string(machine.BMHRole)
// This is bombing when it find 1 error
// Might be better to acculumalte the errors, and
@@ -801,7 +806,7 @@ func (ml *MachineList) GetCluster(sip airshipv1.SIPCluster, c client.Client) err
ml.Machines[bmh.ObjectMeta.Name] = &Machine{
BMH: bmh,
ScheduleStatus: Scheduled,
- VMRole: airshipv1.VMRole(bmh.Labels[SipNodeTypeLabel]),
+ BMHRole: airshipv1.BMHRole(bmh.Labels[SipNodeTypeLabel]),
Data: &MachineData{
IPOnInterface: make(map[string]string),
},
diff --git a/pkg/vbmh/vbmh_suite_test.go b/pkg/bmh/bmh_suite_test.go
similarity index 90%
rename from pkg/vbmh/vbmh_suite_test.go
rename to pkg/bmh/bmh_suite_test.go
index 2238d34..51c8041 100644
--- a/pkg/vbmh/vbmh_suite_test.go
+++ b/pkg/bmh/bmh_suite_test.go
@@ -1,4 +1,4 @@
-package vbmh_test
+package bmh_test
import (
"testing"
diff --git a/pkg/vbmh/vbmh_test.go b/pkg/bmh/bmh_test.go
similarity index 88%
rename from pkg/vbmh/vbmh_test.go
rename to pkg/bmh/bmh_test.go
index af27f6e..16c1251 100644
--- a/pkg/vbmh/vbmh_test.go
+++ b/pkg/bmh/bmh_test.go
@@ -1,4 +1,4 @@
-package vbmh
+package bmh
import (
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
@@ -15,7 +15,7 @@ import (
)
const (
- // numNodes is the number of test vBMH objects (nodes) created for each test
+ // numNodes is the number of test BMH objects (nodes) created for each test
numNodes = 7
)
@@ -25,14 +25,14 @@ var _ = Describe("MachineList", func() {
BeforeEach(func() {
nodes := map[string]*Machine{}
for n := 0; n < numNodes; n++ {
- bmh, _ := testutil.CreateBMH(n, "default", airshipv1.VMControlPlane, 6)
- nodes[bmh.Name], err = NewMachine(*bmh, airshipv1.VMControlPlane, NotScheduled)
+ bmh, _ := testutil.CreateBMH(n, "default", airshipv1.RoleControlPlane, 6)
+ nodes[bmh.Name], err = NewMachine(*bmh, airshipv1.RoleControlPlane, NotScheduled)
Expect(err).To(BeNil())
}
machineList = &MachineList{
NamespacedName: types.NamespacedName{
- Name: "vbmh",
+ Name: "bmh",
Namespace: "default",
},
Machines: nodes,
@@ -95,7 +95,7 @@ var _ = Describe("MachineList", func() {
It("Should retrieve the BMH IP from the BMH's NetworkData secret when infra services are defined", func() {
// Create a BMH with a NetworkData secret
- bmh, networkData := testutil.CreateBMH(1, "default", airshipv1.VMControlPlane, 6)
+ bmh, networkData := testutil.CreateBMH(1, "default", airshipv1.RoleControlPlane, 6)
// Create BMH and NetworkData secret
var objsToApply []runtime.Object
@@ -110,12 +110,12 @@ var _ = Describe("MachineList", func() {
bmh.Spec.BMC.CredentialsName = bmcSecret.Name
objsToApply = append(objsToApply, bmcSecret)
- m, err := NewMachine(*bmh, airshipv1.VMControlPlane, NotScheduled)
+ m, err := NewMachine(*bmh, airshipv1.RoleControlPlane, NotScheduled)
Expect(err).To(BeNil())
ml := &MachineList{
NamespacedName: types.NamespacedName{
- Name: "vbmh",
+ Name: "bmh",
Namespace: "default",
},
Machines: map[string]*Machine{
@@ -146,7 +146,7 @@ var _ = Describe("MachineList", func() {
It("Should not retrieve the BMH IP from the BMH's NetworkData secret if no infraServices are defined", func() {
// Create a BMH with a NetworkData secret
- bmh, networkData := testutil.CreateBMH(1, "default", airshipv1.VMControlPlane, 6)
+ bmh, networkData := testutil.CreateBMH(1, "default", airshipv1.RoleControlPlane, 6)
// Create BMH and NetworkData secret
var objsToApply []runtime.Object
@@ -161,12 +161,12 @@ var _ = Describe("MachineList", func() {
bmh.Spec.BMC.CredentialsName = bmcSecret.Name
objsToApply = append(objsToApply, bmcSecret)
- m, err := NewMachine(*bmh, airshipv1.VMControlPlane, NotScheduled)
+ m, err := NewMachine(*bmh, airshipv1.RoleControlPlane, NotScheduled)
Expect(err).To(BeNil())
ml := &MachineList{
NamespacedName: types.NamespacedName{
- Name: "vbmh",
+ Name: "bmh",
Namespace: "default",
},
Machines: map[string]*Machine{
@@ -206,20 +206,20 @@ var _ = Describe("MachineList", func() {
bmh.Spec.BMC.CredentialsName = "foo-does-not-exist"
- m, err := NewMachine(*bmh, airshipv1.VMControlPlane, NotScheduled)
+ m, err := NewMachine(*bmh, airshipv1.RoleControlPlane, NotScheduled)
Expect(err).To(BeNil())
ml := &MachineList{
NamespacedName: types.NamespacedName{
- Name: "vbmh",
+ Name: "bmh",
Namespace: "default",
},
Machines: map[string]*Machine{
bmh.Name: m,
},
- ReadyForScheduleCount: map[airshipv1.VMRole]int{
- airshipv1.VMControlPlane: 1,
- airshipv1.VMWorker: 0,
+ ReadyForScheduleCount: map[airshipv1.BMHRole]int{
+ airshipv1.RoleControlPlane: 1,
+ airshipv1.RoleWorker: 0,
},
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
}
@@ -259,20 +259,20 @@ var _ = Describe("MachineList", func() {
bmh.Spec.BMC.CredentialsName = bmcSecret.Name
objsToApply = append(objsToApply, bmcSecret)
- m, err := NewMachine(*bmh, airshipv1.VMControlPlane, NotScheduled)
+ m, err := NewMachine(*bmh, airshipv1.RoleControlPlane, NotScheduled)
Expect(err).To(BeNil())
ml := &MachineList{
NamespacedName: types.NamespacedName{
- Name: "vbmh",
+ Name: "bmh",
Namespace: "default",
},
Machines: map[string]*Machine{
bmh.Name: m,
},
- ReadyForScheduleCount: map[airshipv1.VMRole]int{
- airshipv1.VMControlPlane: 1,
- airshipv1.VMWorker: 0,
+ ReadyForScheduleCount: map[airshipv1.BMHRole]int{
+ airshipv1.RoleControlPlane: 1,
+ airshipv1.RoleWorker: 0,
},
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
}
@@ -306,20 +306,20 @@ var _ = Describe("MachineList", func() {
bmh.Spec.NetworkData.Name = "foo-does-not-exist"
bmh.Spec.NetworkData.Namespace = "foo-does-not-exist"
- m, err := NewMachine(*bmh, airshipv1.VMControlPlane, NotScheduled)
+ m, err := NewMachine(*bmh, airshipv1.RoleControlPlane, NotScheduled)
Expect(err).To(BeNil())
ml := &MachineList{
NamespacedName: types.NamespacedName{
- Name: "vbmh",
+ Name: "bmh",
Namespace: "default",
},
Machines: map[string]*Machine{
bmh.Name: m,
},
- ReadyForScheduleCount: map[airshipv1.VMRole]int{
- airshipv1.VMControlPlane: 1,
- airshipv1.VMWorker: 0,
+ ReadyForScheduleCount: map[airshipv1.BMHRole]int{
+ airshipv1.RoleControlPlane: 1,
+ airshipv1.RoleWorker: 0,
},
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
}
@@ -352,20 +352,20 @@ var _ = Describe("MachineList", func() {
networkData.Data = map[string][]byte{"foo": []byte("bad data!")}
- m, err := NewMachine(*bmh, airshipv1.VMControlPlane, NotScheduled)
+ m, err := NewMachine(*bmh, airshipv1.RoleControlPlane, NotScheduled)
Expect(err).To(BeNil())
ml := &MachineList{
NamespacedName: types.NamespacedName{
- Name: "vbmh",
+ Name: "bmh",
Namespace: "default",
},
Machines: map[string]*Machine{
bmh.Name: m,
},
- ReadyForScheduleCount: map[airshipv1.VMRole]int{
- airshipv1.VMControlPlane: 1,
- airshipv1.VMWorker: 0,
+ ReadyForScheduleCount: map[airshipv1.BMHRole]int{
+ airshipv1.RoleControlPlane: 1,
+ airshipv1.RoleWorker: 0,
},
Log: ctrl.Log.WithName("controllers").WithName("SIPCluster"),
}
@@ -406,9 +406,9 @@ var _ = Describe("MachineList", func() {
It("Should not schedule BMH if it is missing networkdata", func() {
// Create a BMH without NetworkData
- bmh, _ := testutil.CreateBMH(1, "default", airshipv1.VMControlPlane, 6)
+ bmh, _ := testutil.CreateBMH(1, "default", airshipv1.RoleControlPlane, 6)
bmh.Spec.NetworkData = nil
- _, err := NewMachine(*bmh, airshipv1.VMControlPlane, NotScheduled)
+ _, err := NewMachine(*bmh, airshipv1.RoleControlPlane, NotScheduled)
Expect(err).ToNot(BeNil())
})
})
diff --git a/pkg/vbmh/errors.go b/pkg/bmh/errors.go
similarity index 89%
rename from pkg/vbmh/errors.go
rename to pkg/bmh/errors.go
index 69f8006..fa34e08 100644
--- a/pkg/vbmh/errors.go
+++ b/pkg/bmh/errors.go
@@ -1,9 +1,10 @@
-package vbmh
+package bmh
import (
"fmt"
metal3 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
airshipv1 "sipcluster/pkg/api/v1"
)
@@ -17,13 +18,13 @@ func (e ErrorConstraintNotFound) Error() string {
}
type ErrorUnableToFullySchedule struct {
- TargetNode airshipv1.VMRole
- TargetFlavor string
+ TargetNode airshipv1.BMHRole
+ TargetLabelSelector metav1.LabelSelector
}
func (e ErrorUnableToFullySchedule) Error() string {
- return fmt.Sprintf("Unable to complete a schedule with a target of %v nodes, with a flavor of %v",
- e.TargetNode, e.TargetFlavor)
+ return fmt.Sprintf("Unable to complete a schedule with a target of %v nodes, with a label selector of %v",
+ e.TargetNode, e.TargetLabelSelector)
}
type ErrorHostIPNotFound struct {
diff --git a/pkg/controllers/sipcluster_controller.go b/pkg/controllers/sipcluster_controller.go
index faf70f3..18baad3 100644
--- a/pkg/controllers/sipcluster_controller.go
+++ b/pkg/controllers/sipcluster_controller.go
@@ -31,8 +31,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
airshipv1 "sipcluster/pkg/api/v1"
+ bmh "sipcluster/pkg/bmh"
airshipsvc "sipcluster/pkg/services"
- airshipvms "sipcluster/pkg/vbmh"
)
// SIPClusterReconciler reconciles a SIPCluster object
@@ -124,7 +124,7 @@ func (r *SIPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request)
log.Error(err, "unable to set condition", "condition", readyCondition)
}
- log.Error(err, "unable to gather vBMHs")
+ log.Error(err, "unable to gather BMHs")
return ctrl.Result{Requeue: true}, err
}
@@ -242,7 +242,7 @@ func removeString(slice []string, s string) []string {
/*
### Gather Phase
-#### Identity BMH VM's
+#### Identity BMH's
- Gather BMH's that meet the criteria expected for the groups
- Check for existing labeled BMH's
- Complete the expected scheduling contraints :
@@ -265,14 +265,14 @@ func removeString(slice []string, s string) []string {
// machines
func (r *SIPClusterReconciler) gatherVBMH(ctx context.Context, sip airshipv1.SIPCluster) (
- *airshipvms.MachineList, error) {
+ *bmh.MachineList, error) {
// 1- Let me retrieve all BMH that are unlabeled or already labeled with the target Tenant/CNF
// 2- Let me now select the one's that meet the scheduling criteria
// If I schedule successfully then
// If Not complete schedule , then throw an error.
logger := logr.FromContext(ctx)
logger.Info("starting to gather BaremetalHost machines for SIPcluster")
- machines := &airshipvms.MachineList{
+ machines := &bmh.MachineList{
Log: logger.WithName("machines"),
NamespacedName: r.NamespacedName,
}
@@ -307,7 +307,7 @@ func (r *SIPClusterReconciler) gatherVBMH(ctx context.Context, sip airshipv1.SIP
return machines, nil
}
-func (r *SIPClusterReconciler) deployInfra(sip airshipv1.SIPCluster, machines *airshipvms.MachineList,
+func (r *SIPClusterReconciler) deployInfra(sip airshipv1.SIPCluster, machines *bmh.MachineList,
logger logr.Logger) error {
newServiceSet := airshipsvc.NewServiceSet(logger, sip, machines, r.Client)
serviceList, err := newServiceSet.ServiceList()
@@ -326,18 +326,18 @@ func (r *SIPClusterReconciler) deployInfra(sip airshipv1.SIPCluster, machines *a
/*
finish shoulld take care of any wrpa up tasks..
*/
-func (r *SIPClusterReconciler) finish(sip airshipv1.SIPCluster, machines *airshipvms.MachineList) error {
- // UnLabel the vBMH's
+func (r *SIPClusterReconciler) finish(sip airshipv1.SIPCluster, machines *bmh.MachineList) error {
+ // UnLabel the BMH's
return machines.ApplyLabels(sip, r.Client)
}
/**
Deal with Deletion and Finalizers if any is needed
-Such as i'e what are we doing with the lables on the vBMH's
+Such as i'e what are we doing with the lables on the BMH's
**/
func (r *SIPClusterReconciler) finalize(ctx context.Context, sip airshipv1.SIPCluster) error {
logger := logr.FromContext(ctx)
- machines := &airshipvms.MachineList{}
+ machines := &bmh.MachineList{}
serviceSet := airshipsvc.NewServiceSet(logger, sip, machines, r.Client)
serviceList, err := serviceSet.ServiceList()
if err != nil {
@@ -354,7 +354,7 @@ func (r *SIPClusterReconciler) finalize(ctx context.Context, sip airshipv1.SIPCl
return err
}
- // 1- Let me retrieve all vBMH mapped for this SIP Cluster
+ // 1- Let me retrieve all BMH mapped for this SIP Cluster
// 2- Let me now select the one's that meet the scheduling criteria
// If I schedule successfully then
// If Not complete schedule , then throw an error.
diff --git a/pkg/controllers/sipcluster_controller_test.go b/pkg/controllers/sipcluster_controller_test.go
index c065c57..bcb90ca 100644
--- a/pkg/controllers/sipcluster_controller_test.go
+++ b/pkg/controllers/sipcluster_controller_test.go
@@ -30,7 +30,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
airshipv1 "sipcluster/pkg/api/v1"
- "sipcluster/pkg/vbmh"
+ bmhpkg "sipcluster/pkg/bmh"
"sipcluster/testutil"
)
@@ -51,20 +51,20 @@ var _ = Describe("SIPCluster controller", func() {
It("Should schedule available nodes", func() {
By("Labeling nodes")
- // Create vBMH test objects
- nodes := []airshipv1.VMRole{airshipv1.VMControlPlane, airshipv1.VMControlPlane, airshipv1.VMControlPlane,
- airshipv1.VMWorker, airshipv1.VMWorker, airshipv1.VMWorker, airshipv1.VMWorker}
+ // Create BMH test objects
+ nodes := []airshipv1.BMHRole{airshipv1.RoleControlPlane, airshipv1.RoleControlPlane, airshipv1.RoleControlPlane,
+ airshipv1.RoleWorker, airshipv1.RoleWorker, airshipv1.RoleWorker, airshipv1.RoleWorker}
bmcUsername := "root"
bmcPassword := "test"
for node, role := range nodes {
- vBMH, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
- bmcSecret := testutil.CreateBMCAuthSecret(vBMH.Name, vBMH.Namespace, bmcUsername,
+ bmh, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
+ bmcSecret := testutil.CreateBMCAuthSecret(bmh.Name, bmh.Namespace, bmcUsername,
bmcPassword)
- vBMH.Spec.BMC.CredentialsName = bmcSecret.Name
+ bmh.Spec.BMC.CredentialsName = bmcSecret.Name
Expect(k8sClient.Create(context.Background(), bmcSecret)).Should(Succeed())
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
}
@@ -78,8 +78,8 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs until SIP has scheduled them to the SIP cluster
Eventually(func() error {
expectedLabels := map[string]string{
- vbmh.SipScheduleLabel: "true",
- vbmh.SipClusterLabel: testNamespace,
+ bmhpkg.SipScheduleLabel: "true",
+ bmhpkg.SipClusterLabel: testNamespace,
}
var bmh metal3.BareMetalHost
@@ -97,12 +97,12 @@ var _ = Describe("SIPCluster controller", func() {
It("Should not schedule nodes when there is an insufficient number of available ControlPlane nodes", func() {
By("Not labeling any nodes")
- // Create vBMH test objects
- nodes := []airshipv1.VMRole{airshipv1.VMControlPlane, airshipv1.VMControlPlane, airshipv1.VMWorker,
- airshipv1.VMWorker, airshipv1.VMWorker, airshipv1.VMWorker}
+ // Create BMH test objects
+ nodes := []airshipv1.BMHRole{airshipv1.RoleControlPlane, airshipv1.RoleControlPlane, airshipv1.RoleWorker,
+ airshipv1.RoleWorker, airshipv1.RoleWorker, airshipv1.RoleWorker}
for node, role := range nodes {
- vBMH, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ bmh, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
}
@@ -115,7 +115,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
- vbmh.SipScheduleLabel: "false",
+ bmhpkg.SipScheduleLabel: "false",
}
var bmh metal3.BareMetalHost
@@ -143,13 +143,13 @@ var _ = Describe("SIPCluster controller", func() {
It("Should not schedule nodes when there is an insufficient number of available Worker nodes", func() {
By("Not labeling any nodes")
- // Create vBMH test objects
- nodes := []airshipv1.VMRole{airshipv1.VMControlPlane, airshipv1.VMControlPlane, airshipv1.VMControlPlane,
- airshipv1.VMWorker, airshipv1.VMWorker}
+ // Create BMH test objects
+ nodes := []airshipv1.BMHRole{airshipv1.RoleControlPlane, airshipv1.RoleControlPlane, airshipv1.RoleControlPlane,
+ airshipv1.RoleWorker, airshipv1.RoleWorker}
testNamespace := "default"
for node, role := range nodes {
- vBMH, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ bmh, networkData := testutil.CreateBMH(node, testNamespace, role, 6)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
}
@@ -162,7 +162,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
- vbmh.SipScheduleLabel: "false",
+ bmhpkg.SipScheduleLabel: "false",
}
var bmh metal3.BareMetalHost
@@ -191,29 +191,29 @@ var _ = Describe("SIPCluster controller", func() {
It("Should not schedule two Worker nodes to the same server", func() {
By("Not labeling any nodes")
- // Create vBMH test objects
+ // Create BMH test objects
var nodes []*metal3.BareMetalHost
baremetalServer := "r06o001"
- vBMH, networkData := testutil.CreateBMH(0, testNamespace, airshipv1.VMControlPlane, 6)
- vBMH.Labels[vbmh.ServerLabel] = baremetalServer
+ bmh, networkData := testutil.CreateBMH(0, testNamespace, airshipv1.RoleControlPlane, 6)
+ bmh.Labels[bmhpkg.ServerLabel] = baremetalServer
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
- vBMH, networkData = testutil.CreateBMH(1, testNamespace, airshipv1.VMWorker, 6)
- vBMH.Labels[vbmh.ServerLabel] = baremetalServer
+ bmh, networkData = testutil.CreateBMH(1, testNamespace, airshipv1.RoleWorker, 6)
+ bmh.Labels[bmhpkg.ServerLabel] = baremetalServer
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
- vBMH, networkData = testutil.CreateBMH(2, testNamespace, airshipv1.VMWorker, 6)
- vBMH.Labels[vbmh.ServerLabel] = baremetalServer
+ bmh, networkData = testutil.CreateBMH(2, testNamespace, airshipv1.RoleWorker, 6)
+ bmh.Labels[bmhpkg.ServerLabel] = baremetalServer
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
// Create SIP cluster
@@ -225,7 +225,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
- vbmh.SipScheduleLabel: "false",
+ bmhpkg.SipScheduleLabel: "false",
}
var bmh metal3.BareMetalHost
@@ -253,29 +253,29 @@ var _ = Describe("SIPCluster controller", func() {
It("Should not schedule two ControlPlane nodes to the same server", func() {
By("Not labeling any nodes")
- // Create vBMH test objects
+ // Create BMH test objects
var nodes []*metal3.BareMetalHost
baremetalServer := "r06o001"
- vBMH, networkData := testutil.CreateBMH(0, testNamespace, airshipv1.VMControlPlane, 6)
- vBMH.Labels[vbmh.ServerLabel] = baremetalServer
+ bmh, networkData := testutil.CreateBMH(0, testNamespace, airshipv1.RoleControlPlane, 6)
+ bmh.Labels[bmhpkg.ServerLabel] = baremetalServer
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
- vBMH, networkData = testutil.CreateBMH(1, testNamespace, airshipv1.VMControlPlane, 6)
- vBMH.Labels[vbmh.ServerLabel] = baremetalServer
+ bmh, networkData = testutil.CreateBMH(1, testNamespace, airshipv1.RoleControlPlane, 6)
+ bmh.Labels[bmhpkg.ServerLabel] = baremetalServer
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
- vBMH, networkData = testutil.CreateBMH(2, testNamespace, airshipv1.VMWorker, 6)
- vBMH.Labels[vbmh.ServerLabel] = baremetalServer
+ bmh, networkData = testutil.CreateBMH(2, testNamespace, airshipv1.RoleWorker, 6)
+ bmh.Labels[bmhpkg.ServerLabel] = baremetalServer
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
// Create SIP cluster
@@ -287,7 +287,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
- vbmh.SipScheduleLabel: "false",
+ bmhpkg.SipScheduleLabel: "false",
}
var bmh metal3.BareMetalHost
@@ -317,39 +317,39 @@ var _ = Describe("SIPCluster controller", func() {
It("Should not schedule two Worker nodes to the same rack", func() {
By("Not labeling any nodes")
- // Create vBMH test objects
+ // Create BMH test objects
var nodes []*metal3.BareMetalHost
testNamespace := "default"
- vBMH, networkData := testutil.CreateBMH(0, testNamespace, airshipv1.VMControlPlane, 6)
+ bmh, networkData := testutil.CreateBMH(0, testNamespace, airshipv1.RoleControlPlane, 6)
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
- vBMH, networkData = testutil.CreateBMH(1, testNamespace, airshipv1.VMWorker, 6)
+ bmh, networkData = testutil.CreateBMH(1, testNamespace, airshipv1.RoleWorker, 6)
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
- vBMH, networkData = testutil.CreateBMH(2, testNamespace, airshipv1.VMWorker, 6)
+ bmh, networkData = testutil.CreateBMH(2, testNamespace, airshipv1.RoleWorker, 6)
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
// Create SIP cluster
name := "subcluster-test3"
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 1, 2)
- controlPlaneSpec := sipCluster.Spec.Nodes[airshipv1.VMControlPlane]
+ controlPlaneSpec := sipCluster.Spec.Nodes[airshipv1.RoleControlPlane]
controlPlaneSpec.Scheduling = airshipv1.RackAntiAffinity
- sipCluster.Spec.Nodes[airshipv1.VMControlPlane] = controlPlaneSpec
+ sipCluster.Spec.Nodes[airshipv1.RoleControlPlane] = controlPlaneSpec
- workerSpec := sipCluster.Spec.Nodes[airshipv1.VMWorker]
+ workerSpec := sipCluster.Spec.Nodes[airshipv1.RoleWorker]
workerSpec.Scheduling = airshipv1.RackAntiAffinity
- sipCluster.Spec.Nodes[airshipv1.VMWorker] = workerSpec
+ sipCluster.Spec.Nodes[airshipv1.RoleWorker] = workerSpec
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
@@ -357,7 +357,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
- vbmh.SipScheduleLabel: "false",
+ bmhpkg.SipScheduleLabel: "false",
}
var bmh metal3.BareMetalHost
@@ -385,38 +385,38 @@ var _ = Describe("SIPCluster controller", func() {
It("Should not schedule two ControlPlane nodes to the same rack", func() {
By("Not labeling any nodes")
- // Create vBMH test objects
+ // Create BMH test objects
var nodes []*metal3.BareMetalHost
- vBMH, networkData := testutil.CreateBMH(0, testNamespace, airshipv1.VMControlPlane, 6)
+ bmh, networkData := testutil.CreateBMH(0, testNamespace, airshipv1.RoleControlPlane, 6)
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
- vBMH, networkData = testutil.CreateBMH(1, testNamespace, airshipv1.VMControlPlane, 6)
+ bmh, networkData = testutil.CreateBMH(1, testNamespace, airshipv1.RoleControlPlane, 6)
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
- vBMH, networkData = testutil.CreateBMH(2, testNamespace, airshipv1.VMWorker, 6)
+ bmh, networkData = testutil.CreateBMH(2, testNamespace, airshipv1.RoleWorker, 6)
- nodes = append(nodes, vBMH)
- Expect(k8sClient.Create(context.Background(), vBMH)).Should(Succeed())
+ nodes = append(nodes, bmh)
+ Expect(k8sClient.Create(context.Background(), bmh)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), networkData)).Should(Succeed())
// Create SIP cluster
name := "subcluster-test3"
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster(name, testNamespace, 2, 1)
- controlPlaneSpec := sipCluster.Spec.Nodes[airshipv1.VMControlPlane]
+ controlPlaneSpec := sipCluster.Spec.Nodes[airshipv1.RoleControlPlane]
controlPlaneSpec.Scheduling = airshipv1.RackAntiAffinity
- sipCluster.Spec.Nodes[airshipv1.VMControlPlane] = controlPlaneSpec
+ sipCluster.Spec.Nodes[airshipv1.RoleControlPlane] = controlPlaneSpec
- workerSpec := sipCluster.Spec.Nodes[airshipv1.VMWorker]
+ workerSpec := sipCluster.Spec.Nodes[airshipv1.RoleWorker]
workerSpec.Scheduling = airshipv1.RackAntiAffinity
- sipCluster.Spec.Nodes[airshipv1.VMWorker] = workerSpec
+ sipCluster.Spec.Nodes[airshipv1.RoleWorker] = workerSpec
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
Expect(k8sClient.Create(context.Background(), sipCluster)).Should(Succeed())
@@ -424,7 +424,7 @@ var _ = Describe("SIPCluster controller", func() {
// Poll BMHs and validate they are not scheduled
Consistently(func() error {
expectedLabels := map[string]string{
- vbmh.SipScheduleLabel: "false",
+ bmhpkg.SipScheduleLabel: "false",
}
var bmh metal3.BareMetalHost
diff --git a/pkg/services/jumphost.go b/pkg/services/jumphost.go
index a39c0e5..3607bc6 100644
--- a/pkg/services/jumphost.go
+++ b/pkg/services/jumphost.go
@@ -32,7 +32,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
airshipv1 "sipcluster/pkg/api/v1"
- airshipvms "sipcluster/pkg/vbmh"
+ bmh "sipcluster/pkg/bmh"
)
const (
@@ -63,11 +63,11 @@ type jumpHost struct {
sipName types.NamespacedName
logger logr.Logger
config airshipv1.JumpHostService
- machines *airshipvms.MachineList
+ machines *bmh.MachineList
}
func newJumpHost(name, namespace string, logger logr.Logger, config airshipv1.JumpHostService,
- machines *airshipvms.MachineList, client client.Client) InfraService {
+ machines *bmh.MachineList, client client.Client) InfraService {
return jumpHost{
sipName: types.NamespacedName{
Name: name,
@@ -305,7 +305,7 @@ func (jh jumpHost) generateConfigMap(instance string, labels map[string]string)
},
Data: map[string]string{
nameAuthorizedKeysVolume: strings.Join(jh.config.SSHAuthorizedKeys, "\n"),
- "vm": fmt.Sprintf(rebootScript, mountPathHosts),
+ "host": fmt.Sprintf(rebootScript, mountPathHosts),
},
}, nil
}
@@ -453,7 +453,7 @@ type bmc struct {
// generateHostList creates a list of hosts in JSON format to be mounted as a config map to the jump host pod and used
// to power cycle sub-cluster nodes.
-func generateHostList(machineList airshipvms.MachineList) ([]byte, error) {
+func generateHostList(machineList bmh.MachineList) ([]byte, error) {
hosts := make([]host, len(machineList.Machines))
for name, machine := range machineList.Machines {
managementIP, err := getManagementIP(machine.BMH.Spec.BMC.Address)
@@ -494,7 +494,7 @@ func getManagementIP(redfishURL string) (string, error) {
var rebootScript = `#!/bin/sh
-# Support Infrastructure Provider (SIP) VM Utility
+# Support Infrastructure Provider (SIP) Host Utility
# DO NOT MODIFY: generated by SIP
HOSTS_FILE="%s"
@@ -503,7 +503,7 @@ LIST_COMMAND="list"
REBOOT_COMMAND="reboot"
help() {
- echo "Support Infrastructure Provider (SIP) VM Utility"
+ echo "Support Infrastructure Provider (SIP) Host Utility"
echo ""
echo "Usage: ${LIST_COMMAND} list hosts"
echo " ${REBOOT_COMMAND} [host name] reboot host"
diff --git a/pkg/services/loadbalancer.go b/pkg/services/loadbalancer.go
index 35add2c..da15139 100644
--- a/pkg/services/loadbalancer.go
+++ b/pkg/services/loadbalancer.go
@@ -19,7 +19,7 @@ import (
"html/template"
airshipv1 "sipcluster/pkg/api/v1"
- airshipvms "sipcluster/pkg/vbmh"
+ bmh "sipcluster/pkg/bmh"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/types"
@@ -149,7 +149,7 @@ func (lb loadBalancer) generateSecret(instance string) (*corev1.Secret, error) {
Backends: make([]backend, 0),
}
for _, machine := range lb.machines.Machines {
- if machine.VMRole == airshipv1.VMControlPlane {
+ if machine.BMHRole == airshipv1.RoleControlPlane {
name := machine.BMH.Name
namespace := machine.BMH.Namespace
ip, exists := machine.Data.IPOnInterface[lb.config.NodeInterface]
@@ -215,13 +215,13 @@ type loadBalancer struct {
sipName types.NamespacedName
logger logr.Logger
config airshipv1.SIPClusterService
- machines *airshipvms.MachineList
+ machines *bmh.MachineList
}
func newLB(name, namespace string,
logger logr.Logger,
config airshipv1.SIPClusterService,
- machines *airshipvms.MachineList,
+ machines *bmh.MachineList,
client client.Client) loadBalancer {
return loadBalancer{
sipName: types.NamespacedName{
diff --git a/pkg/services/services_test.go b/pkg/services/services_test.go
index 2df11ac..dd26a12 100644
--- a/pkg/services/services_test.go
+++ b/pkg/services/services_test.go
@@ -14,8 +14,8 @@ import (
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "sipcluster/pkg/bmh"
"sipcluster/pkg/services"
- "sipcluster/pkg/vbmh"
"sipcluster/testutil"
)
@@ -27,8 +27,8 @@ const (
var bmh1 *metal3.BareMetalHost
var bmh2 *metal3.BareMetalHost
-var m1 *vbmh.Machine
-var m2 *vbmh.Machine
+var m1 *bmh.Machine
+var m2 *bmh.Machine
// Re-declared from services package for testing purposes
type host struct {
@@ -43,7 +43,7 @@ type bmc struct {
}
var _ = Describe("Service Set", func() {
- var machineList *vbmh.MachineList
+ var machineList *bmh.MachineList
BeforeEach(func() {
bmh1, _ = testutil.CreateBMH(1, "default", "control-plane", 1)
bmh2, _ = testutil.CreateBMH(2, "default", "control-plane", 2)
@@ -57,26 +57,26 @@ var _ = Describe("Service Set", func() {
bmh1.Spec.BMC.CredentialsName = bmcSecret.Name
bmh2.Spec.BMC.CredentialsName = bmcSecret.Name
- m1 = &vbmh.Machine{
+ m1 = &bmh.Machine{
BMH: *bmh1,
- Data: &vbmh.MachineData{
+ Data: &bmh.MachineData{
IPOnInterface: map[string]string{
"eno3": ip1,
},
},
}
- m2 = &vbmh.Machine{
+ m2 = &bmh.Machine{
BMH: *bmh2,
- Data: &vbmh.MachineData{
+ Data: &bmh.MachineData{
IPOnInterface: map[string]string{
"eno3": ip2,
},
},
}
- machineList = &vbmh.MachineList{
- Machines: map[string]*vbmh.Machine{
+ machineList = &bmh.MachineList{
+ Machines: map[string]*bmh.Machine{
bmh1.GetName(): m1,
bmh2.GetName(): m2,
},
@@ -96,8 +96,8 @@ var _ = Describe("Service Set", func() {
sipCluster, nodeSSHPrivateKeys := testutil.CreateSIPCluster("default", "default", 1, 1)
Expect(k8sClient.Create(context.Background(), nodeSSHPrivateKeys)).Should(Succeed())
- machineList = &vbmh.MachineList{
- Machines: map[string]*vbmh.Machine{
+ machineList = &bmh.MachineList{
+ Machines: map[string]*bmh.Machine{
bmh1.GetName(): m1,
bmh2.GetName(): m2,
},
@@ -140,7 +140,7 @@ var _ = Describe("Service Set", func() {
})
-func testDeployment(sip *airshipv1.SIPCluster, machineList vbmh.MachineList) error {
+func testDeployment(sip *airshipv1.SIPCluster, machineList bmh.MachineList) error {
loadBalancerDeployment := &appsv1.Deployment{}
err := k8sClient.Get(context.Background(), types.NamespacedName{
Namespace: "default",
diff --git a/pkg/services/set.go b/pkg/services/set.go
index 2f9a5ed..92759da 100644
--- a/pkg/services/set.go
+++ b/pkg/services/set.go
@@ -23,7 +23,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
airshipv1 "sipcluster/pkg/api/v1"
- airshipvms "sipcluster/pkg/vbmh"
+ bmh "sipcluster/pkg/bmh"
)
// InfraService generalizes inftracture services
@@ -36,7 +36,7 @@ type InfraService interface {
type ServiceSet struct {
logger logr.Logger
sip airshipv1.SIPCluster
- machines *airshipvms.MachineList
+ machines *bmh.MachineList
client client.Client
}
@@ -44,7 +44,7 @@ type ServiceSet struct {
func NewServiceSet(
logger logr.Logger,
sip airshipv1.SIPCluster,
- machines *airshipvms.MachineList,
+ machines *bmh.MachineList,
client client.Client) ServiceSet {
logger = logger.WithValues("SIPCluster", types.NamespacedName{Name: sip.GetNamespace(), Namespace: sip.GetName()})
diff --git a/testutil/testutil.go b/testutil/testutil.go
index 5559aa2..4e1a19a 100644
--- a/testutil/testutil.go
+++ b/testutil/testutil.go
@@ -10,18 +10,18 @@ import (
airshipv1 "sipcluster/pkg/api/v1"
)
-var vinoFlavorMap = map[airshipv1.VMRole]string{
- airshipv1.VMControlPlane: "control-plane",
- airshipv1.VMWorker: "worker",
+var bmhRoleToLabelValue = map[airshipv1.BMHRole]string{
+ airshipv1.RoleControlPlane: "control-plane",
+ airshipv1.RoleWorker: "worker",
}
-// NOTE(aw442m): These constants have been redefined from the vbmh package in order to avoid an import cycle.
+// NOTE(aw442m): These constants have been redefined from the bmh package in order to avoid an import cycle.
const (
sipRackLabel = "sip.airshipit.org/rack"
sipScheduleLabel = "sip.airshipit.org/scheduled"
sipServerLabel = "sip.airshipit.org/server"
- VinoFlavorLabel = "vino.airshipit.org/flavor"
+ bmhLabel = "example.org/bmh-label"
sshPrivateKeyBase64 = "DUMMY_DATA"
@@ -173,7 +173,7 @@ const (
)
// CreateBMH initializes a BaremetalHost with specific parameters for use in test cases.
-func CreateBMH(node int, namespace string, role airshipv1.VMRole, rack int) (*metal3.BareMetalHost, *corev1.Secret) {
+func CreateBMH(node int, namespace string, role airshipv1.BMHRole, rack int) (*metal3.BareMetalHost, *corev1.Secret) {
rackLabel := fmt.Sprintf("r%d", rack)
networkDataName := fmt.Sprintf("node%d-network-data", node)
return &metal3.BareMetalHost{
@@ -181,10 +181,10 @@ func CreateBMH(node int, namespace string, role airshipv1.VMRole, rack int) (*me
Name: fmt.Sprintf("node0%d", node),
Namespace: namespace,
Labels: map[string]string{
- "vino.airshipit.org/flavor": vinoFlavorMap[role],
- sipScheduleLabel: "false",
- sipRackLabel: rackLabel,
- sipServerLabel: fmt.Sprintf("stl2%so%d", rackLabel, node),
+ bmhLabel: bmhRoleToLabelValue[role],
+ sipScheduleLabel: "false",
+ sipRackLabel: rackLabel,
+ sipServerLabel: fmt.Sprintf("stl2%so%d", rackLabel, node),
},
},
Spec: metal3.BareMetalHostSpec{
@@ -222,19 +222,27 @@ func CreateSIPCluster(name string, namespace string, controlPlanes int, workers
Namespace: namespace,
},
Spec: airshipv1.SIPClusterSpec{
- Nodes: map[airshipv1.VMRole]airshipv1.NodeSet{
- airshipv1.VMControlPlane: {
- VMFlavor: "vino.airshipit.org/flavor=" + vinoFlavorMap[airshipv1.VMControlPlane],
+ Nodes: map[airshipv1.BMHRole]airshipv1.NodeSet{
+ airshipv1.RoleControlPlane: {
+ LabelSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ bmhLabel: bmhRoleToLabelValue[airshipv1.RoleControlPlane],
+ },
+ },
Scheduling: airshipv1.HostAntiAffinity,
- Count: &airshipv1.VMCount{
+ Count: &airshipv1.NodeCount{
Active: controlPlanes,
Standby: 0,
},
},
- airshipv1.VMWorker: {
- VMFlavor: "vino.airshipit.org/flavor=" + vinoFlavorMap[airshipv1.VMWorker],
+ airshipv1.RoleWorker: {
+ LabelSelector: metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ bmhLabel: bmhRoleToLabelValue[airshipv1.RoleWorker],
+ },
+ },
Scheduling: airshipv1.HostAntiAffinity,
- Count: &airshipv1.VMCount{
+ Count: &airshipv1.NodeCount{
Active: workers,
Standby: 0,
},