diff --git a/test/extended/machine_config/pinnedimages.go b/test/extended/machine_config/pinnedimages.go index 08ae7c1e4adb..99b6d9e5eb33 100644 --- a/test/extended/machine_config/pinnedimages.go +++ b/test/extended/machine_config/pinnedimages.go @@ -352,24 +352,27 @@ func applyPIS(oc *exutil.CLI, pisFixture string, pis *mcfgv1.PinnedImageSet, pis // `addWorkerNodesToCustomPool` labels the desired number of worker nodes with the MCP role // selector so that the nodes become part of the desired custom MCP func addWorkerNodesToCustomPool(oc *exutil.CLI, kubeClient *kubernetes.Clientset, numberOfNodes int, customMCP string) ([]string, error) { - // Get the worker nodes - nodes, err := kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{"node-role.kubernetes.io/worker": ""}).String()}) + // Get ready schedulable worker nodes (excludes nodes with NoSchedule/NoExecute taints and control-plane nodes) + nodes, err := exutil.GetReadySchedulableWorkerNodes(context.TODO(), kubeClient) if err != nil { return nil, err } - // Return an error if there are less worker nodes in the cluster than the desired number of nodes to add to the custom MCP - if len(nodes.Items) < numberOfNodes { - return nil, fmt.Errorf("Node in Worker MCP %d < Number of nodes needed in %d MCP", len(nodes.Items), numberOfNodes) + + // Skip test gracefully if there are not enough schedulable worker nodes + // This handles SNO and compact cluster scenarios where nodes may have taints or dual roles + if len(nodes) < numberOfNodes { + g.Skip(fmt.Sprintf("Insufficient schedulable worker nodes: have %d, need %d (nodes may have taints or control-plane role)", len(nodes), numberOfNodes)) + return nil, nil } // Label the nodes with the custom MCP role selector var optedNodes []string for node_i := 0; node_i < numberOfNodes; node_i++ { - err = oc.AsAdmin().Run("label").Args("node", nodes.Items[node_i].Name, fmt.Sprintf("node-role.kubernetes.io/%s=", customMCP)).Execute() + err = oc.AsAdmin().Run("label").Args("node", nodes[node_i].Name, fmt.Sprintf("node-role.kubernetes.io/%s=", customMCP)).Execute() if err != nil { return nil, err } - optedNodes = append(optedNodes, nodes.Items[node_i].Name) + optedNodes = append(optedNodes, nodes[node_i].Name) } return optedNodes, nil } diff --git a/test/extended/util/nodes.go b/test/extended/util/nodes.go index df48b7a82362..fe83c8227566 100644 --- a/test/extended/util/nodes.go +++ b/test/extended/util/nodes.go @@ -158,3 +158,56 @@ func triggerNodeReboot(kubeClient kubernetes.Interface, nodeName string, attempt } return err } + +// GetReadySchedulableWorkerNodes returns ready schedulable worker nodes. +// This function filters out nodes with NoSchedule/NoExecute taints and nodes +// with control-plane/master roles, making it suitable for tests that need to +// select pure worker nodes for workload placement (like MachineConfigPool assignments). +func GetReadySchedulableWorkerNodes(ctx context.Context, client kubernetes.Interface) ([]corev1.Node, error) { + // Get all nodes + allNodes, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var schedulableWorkerNodes []corev1.Node + for _, node := range allNodes.Items { + // Skip if node is not ready + ready := false + for _, condition := range node.Status.Conditions { + if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue { + ready = true + break + } + } + if !ready { + continue + } + + // Skip if node has NoSchedule or NoExecute taints + hasUnschedulableTaint := false + for _, taint := range node.Spec.Taints { + if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute { + hasUnschedulableTaint = true + break + } + } + if hasUnschedulableTaint { + continue + } + + // Skip if node has control-plane or master role (we want pure worker nodes) + _, hasControlPlane := node.Labels["node-role.kubernetes.io/control-plane"] + _, hasMaster := node.Labels["node-role.kubernetes.io/master"] + if hasControlPlane || hasMaster { + continue + } + + // Only include if node has worker role + if _, hasWorker := node.Labels["node-role.kubernetes.io/worker"]; hasWorker { + schedulableWorkerNodes = append(schedulableWorkerNodes, node) + } + } + + return schedulableWorkerNodes, nil +}