Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 10 additions & 7 deletions test/extended/machine_config/pinnedimages.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,24 +353,27 @@ func applyPIS(oc *exutil.CLI, pisFixture string, pis *mcfgv1.PinnedImageSet, pis
// `addWorkerNodesToCustomPool` labels the desired number of worker nodes with the MCP role
// selector so that the nodes become part of the desired custom MCP
func addWorkerNodesToCustomPool(oc *exutil.CLI, kubeClient *kubernetes.Clientset, numberOfNodes int, customMCP string) ([]string, error) {
// Get the worker nodes
nodes, err := kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{"node-role.kubernetes.io/worker": ""}).String()})
// Get ready schedulable worker nodes (excludes nodes with NoSchedule/NoExecute taints and control-plane nodes)
nodes, err := exutil.GetReadySchedulableWorkerNodes(context.TODO(), kubeClient)
if err != nil {
return nil, err
}
// Return an error if there are less worker nodes in the cluster than the desired number of nodes to add to the custom MCP
if len(nodes.Items) < numberOfNodes {
return nil, fmt.Errorf("Node in Worker MCP %d < Number of nodes needed in %d MCP", len(nodes.Items), numberOfNodes)

// Skip test gracefully if there are not enough schedulable worker nodes
// This handles SNO and compact cluster scenarios where nodes may have taints or dual roles
if len(nodes) < numberOfNodes {
g.Skip(fmt.Sprintf("Insufficient schedulable worker nodes: have %d, need %d (nodes may have taints or control-plane role)", len(nodes), numberOfNodes))
return nil, nil
}

// Label the nodes with the custom MCP role selector
var optedNodes []string
for node_i := 0; node_i < numberOfNodes; node_i++ {
err = oc.AsAdmin().Run("label").Args("node", nodes.Items[node_i].Name, fmt.Sprintf("node-role.kubernetes.io/%s=", customMCP)).Execute()
err = oc.AsAdmin().Run("label").Args("node", nodes[node_i].Name, fmt.Sprintf("node-role.kubernetes.io/%s=", customMCP)).Execute()
if err != nil {
return nil, err
}
optedNodes = append(optedNodes, nodes.Items[node_i].Name)
optedNodes = append(optedNodes, nodes[node_i].Name)
}
return optedNodes, nil
}
Expand Down
53 changes: 53 additions & 0 deletions test/extended/util/nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,3 +193,56 @@ func createNodeDisruptionPod(kubeClient kubernetes.Interface, nodeName string, a
}
return err
}

// GetReadySchedulableWorkerNodes returns ready schedulable worker nodes.
// This function filters out nodes with NoSchedule/NoExecute taints and nodes
// with control-plane/master roles, making it suitable for tests that need to
// select pure worker nodes for workload placement (like MachineConfigPool assignments).
func GetReadySchedulableWorkerNodes(ctx context.Context, client kubernetes.Interface) ([]corev1.Node, error) {
// Get all nodes
allNodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}

var schedulableWorkerNodes []corev1.Node
for _, node := range allNodes.Items {
// Skip if node is not ready
ready := false
for _, condition := range node.Status.Conditions {
if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue {
ready = true
break
}
}
if !ready {
continue
}

// Skip if node has NoSchedule or NoExecute taints
hasUnschedulableTaint := false
for _, taint := range node.Spec.Taints {
if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute {
hasUnschedulableTaint = true
break
}
}
if hasUnschedulableTaint {
continue
}

// Skip if node has control-plane or master role (we want pure worker nodes)
_, hasControlPlane := node.Labels["node-role.kubernetes.io/control-plane"]
_, hasMaster := node.Labels["node-role.kubernetes.io/master"]
if hasControlPlane || hasMaster {
continue
}

// Only include if node has worker role
if _, hasWorker := node.Labels["node-role.kubernetes.io/worker"]; hasWorker {
schedulableWorkerNodes = append(schedulableWorkerNodes, node)
}
}

return schedulableWorkerNodes, nil
}