From 9126faa5a9fe95a0f6830a93b8164cd5f0fb9227 Mon Sep 17 00:00:00 2001 From: Mike Rostermund Date: Tue, 24 Mar 2020 21:13:05 +0100 Subject: [PATCH 1/3] Handle changes to image versions This simply deletes all pods running container image versions that doesn't match our desired version. Rely on method ensurePodsExist to create the pods in the correct version. --- README.md | 4 +- .../core.humio.com_humioclusters_crd.yaml | 10 ++-- ...re.humio.com_v1alpha1_humiocluster_cr.yaml | 3 +- hack/restart-k8s.sh | 4 +- pkg/apis/core/v1alpha1/humiocluster_types.go | 4 +- .../humiocluster/cluster_auth_test.go | 3 +- pkg/controller/humiocluster/defaults.go | 3 +- .../humiocluster/humiocluster_controller.go | 57 ++++++++++++++++++- .../humiocluster_controller_test.go | 3 +- pkg/controller/humiocluster/pods.go | 12 +++- 10 files changed, 80 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 79aa58919..3e8d355f6 100644 --- a/README.md +++ b/README.md @@ -23,8 +23,6 @@ kind: HumioCluster metadata: name: humiocluster-sample spec: - image: humio/humio-core - version: "1.9.0" + image: "humio/humio-core:1.9.1" targetReplicationFactor: 2 ``` - diff --git a/deploy/crds/core.humio.com_humioclusters_crd.yaml b/deploy/crds/core.humio.com_humioclusters_crd.yaml index e1bf9eb13..a1a276f12 100644 --- a/deploy/crds/core.humio.com_humioclusters_crd.yaml +++ b/deploy/crds/core.humio.com_humioclusters_crd.yaml @@ -133,7 +133,7 @@ spec: type: object type: array image: - description: Desired container image + description: Desired container image including the image tag type: string nodeCount: description: Desired number of nodes @@ -144,9 +144,6 @@ spec: targetReplicationFactor: description: Desired number of replicas of both storage and ingest partitions type: integer - version: - description: Desired version of Humio nodes - type: string type: object status: description: HumioClusterStatus defines the observed state of HumioCluster @@ -154,6 +151,11 @@ spec: allDataAvailable: description: Current state set by operator. type: string + clusterState: + description: 'ClusterState will be empty before the cluster is bootstrapped. + From there it can be "Bootstrapping" or "Operational" TODO: other + states?' + type: string stateLastUpdated: format: int64 type: integer diff --git a/deploy/crds/core.humio.com_v1alpha1_humiocluster_cr.yaml b/deploy/crds/core.humio.com_v1alpha1_humiocluster_cr.yaml index 26e09fda5..9cce7ef24 100644 --- a/deploy/crds/core.humio.com_v1alpha1_humiocluster_cr.yaml +++ b/deploy/crds/core.humio.com_v1alpha1_humiocluster_cr.yaml @@ -3,8 +3,7 @@ kind: HumioCluster metadata: name: example-humiocluster spec: - image: humio/humio-core - version: "1.9.0" + image: "humio/humio-core:1.9.1" targetReplicationFactor: 2 storagePartitionsCount: 24 environmentVariables: diff --git a/hack/restart-k8s.sh b/hack/restart-k8s.sh index 3d904fd78..fb2f29f22 100755 --- a/hack/restart-k8s.sh +++ b/hack/restart-k8s.sh @@ -29,8 +29,8 @@ kind load docker-image --name kind docker.io/confluentinc/cp-zookeeper:5.4.1 kind load docker-image --name kind solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143 # Pre-load humio images -docker pull humio/humio-core:1.9.0 -kind load docker-image --name kind humio/humio-core:1.9.0 +docker pull humio/humio-core:1.9.1 +kind load docker-image --name kind humio/humio-core:1.9.1 # Use helm 3 to start up Kafka and Zookeeper mkdir ~/git diff --git a/pkg/apis/core/v1alpha1/humiocluster_types.go b/pkg/apis/core/v1alpha1/humiocluster_types.go index 48c39ab6f..c44f3cb53 100644 --- a/pkg/apis/core/v1alpha1/humiocluster_types.go +++ b/pkg/apis/core/v1alpha1/humiocluster_types.go @@ -7,10 +7,8 @@ import ( // HumioClusterSpec defines the desired state of HumioCluster type HumioClusterSpec struct { - // Desired container image + // Desired container image including the image tag Image string `json:"image,omitempty"` - // Desired version of Humio nodes - Version string `json:"version,omitempty"` // Desired number of replicas of both storage and ingest partitions TargetReplicationFactor int `json:"targetReplicationFactor,omitempty"` // Desired number of storage partitions diff --git a/pkg/controller/humiocluster/cluster_auth_test.go b/pkg/controller/humiocluster/cluster_auth_test.go index 53528717e..62e33d776 100644 --- a/pkg/controller/humiocluster/cluster_auth_test.go +++ b/pkg/controller/humiocluster/cluster_auth_test.go @@ -30,8 +30,7 @@ func TestGetJWTForSingleUser(t *testing.T) { Namespace: "logging", }, Spec: corev1alpha1.HumioClusterSpec{ - Image: "humio/humio-core", - Version: "1.9.0", + Image: "humio/humio-core:1.9.1", TargetReplicationFactor: 3, NodeCount: 3, }, diff --git a/pkg/controller/humiocluster/defaults.go b/pkg/controller/humiocluster/defaults.go index 721983b65..6827c8ffe 100644 --- a/pkg/controller/humiocluster/defaults.go +++ b/pkg/controller/humiocluster/defaults.go @@ -10,8 +10,7 @@ import ( const ( name = "humiocluster" namespace = "logging" - image = "humio/humio-core" - version = "1.9.0" + image = "humio/humio-core:1.9.1" targetReplicationFactor = 2 storagePartitionsCount = 24 digestPartitionsCount = 24 diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index 8ccdc13d3..05d95b269 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -139,9 +139,16 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{}, err } - // Ensure pods exist. Will requeue if not all pods are created and ready emptyResult := reconcile.Result{} - result, err := r.ensurePodsExist(context.TODO(), humioCluster) + + // Ensure pods that does not run the desired version are deleted. + result, err := r.ensureMismatchedPodVersionsAreDeleted(context.TODO(), humioCluster) + if result != emptyResult || err != nil { + return result, err + } + + // Ensure pods exist. Will requeue if not all pods are created and ready + result, err = r.ensurePodsExist(context.TODO(), humioCluster) if result != emptyResult || err != nil { return result, err } @@ -258,6 +265,52 @@ func (r *ReconcileHumioCluster) ensureServiceExists(context context.Context, hc return nil } +// ensureMismatchedPodVersionsAreDeleted is used to delete pods which container image does not match the desired image from the HumioCluster. +// If a pod is deleted, this will requeue immediately and rely on the next reconciliation to delete the next pod. +// The method only returns an empty result and no error if all pods are running the desired version, +// and no pod is currently being deleted. +func (r *ReconcileHumioCluster) ensureMismatchedPodVersionsAreDeleted(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) (reconcile.Result, error) { + foundPodList, err := ListPods(r.client, humioCluster) + if err != nil { + return reconcile.Result{}, err + } + + // if we do not have any pods running we have nothing to clean up, or wait until they have been deleted + if len(foundPodList) == 0 { + return reconcile.Result{}, nil + } + + podBeingDeleted := false + for _, pod := range foundPodList { + // TODO: can we assume we always only have one pod? + // Probably not if running in a service mesh with sidecars injected. + // Should have a container name variable and match this here. + + // only consider pods not already being deleted + if pod.DeletionTimestamp == nil { + + // if container image versions of a pod differs, we want to delete it + if pod.Spec.Containers[0].Image != humioCluster.Spec.Image { + // TODO: figure out if we should only allow upgrades and not downgrades + err = DeletePod(r.client, pod) + if err != nil { + return reconcile.Result{}, fmt.Errorf("could not delete pod %s, got err: %v", pod.Name, err) + } + return reconcile.Result{Requeue: true}, nil + } + } else { + podBeingDeleted = true + } + + } + // if we have pods being deleted, requeue after a short delay + if podBeingDeleted { + return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 10}, nil + } + // return empty result and no error indicating that everything was in the state we wanted it to be + return reconcile.Result{}, nil +} + // TODO: change to create 1 pod at a time, return Requeue=true and RequeueAfter. // check that other pods, if they exist, are in a ready state func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) (reconcile.Result, error) { diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index 1326c976a..3e53e6ef8 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -39,8 +39,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { Namespace: "logging", }, Spec: humioClusterv1alpha1.HumioClusterSpec{ - Image: "humio/humio-core", - Version: "1.9.0", + Image: "humio/humio-core:1.9.1", TargetReplicationFactor: 2, StoragePartitionsCount: 3, DigestPartitionsCount: 3, diff --git a/pkg/controller/humiocluster/pods.go b/pkg/controller/humiocluster/pods.go index d14cbe613..8a338aa68 100644 --- a/pkg/controller/humiocluster/pods.go +++ b/pkg/controller/humiocluster/pods.go @@ -31,7 +31,7 @@ func (r *ReconcileHumioCluster) constructPod(hc *corev1alpha1.HumioCluster) (*co Containers: []corev1.Container{ { Name: "humio", - Image: fmt.Sprintf("%s:%s", hc.Spec.Image, hc.Spec.Version), + Image: hc.Spec.Image, Ports: []corev1.ContainerPort{ { Name: "http", @@ -112,6 +112,16 @@ func ListPods(c client.Client, hc *corev1alpha1.HumioCluster) ([]corev1.Pod, err return foundPodList.Items, nil } +// DeletePod deletes a given pod +func DeletePod(c client.Client, existingPod corev1.Pod) error { + err := c.Delete(context.TODO(), &existingPod) + if err != nil { + return err + } + + return nil +} + func labelsForPod(clusterName string, nodeID int) map[string]string { labels := labelsForHumio(clusterName) labels["node_id"] = strconv.Itoa(nodeID) From 6a27975e290f744e1d2944faaff7c94a820453df Mon Sep 17 00:00:00 2001 From: Mike Rostermund Date: Tue, 24 Mar 2020 21:31:02 +0100 Subject: [PATCH 2/3] Refactor podHasLabel to be more generic --- pkg/controller/humiocluster/helpers.go | 9 +++++++++ .../humiocluster/humiocluster_controller.go | 11 +---------- .../humiocluster/humiocluster_controller_test.go | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/controller/humiocluster/helpers.go b/pkg/controller/humiocluster/helpers.go index be4b37005..44c07f492 100644 --- a/pkg/controller/humiocluster/helpers.go +++ b/pkg/controller/humiocluster/helpers.go @@ -17,3 +17,12 @@ func matchingLabelsForHumio(clusterName string) client.MatchingLabels { matchingLabels = labelsForHumio(clusterName) return matchingLabels } + +func labelListContainsLabel(labelList map[string]string, label string) bool { + for labelName := range labelList { + if labelName == label { + return true + } + } + return false +} diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index 05d95b269..daeede7cd 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -191,7 +191,7 @@ func (r *ReconcileHumioCluster) ensurePodLabels(context context.Context, hc *cor for _, pod := range foundPodList { // Skip pods that already have a label - if podHasLabel(pod.GetLabels(), "node_id") { + if labelListContainsLabel(pod.GetLabels(), "node_id") { continue } // If pod does not have an IP yet it is probably pending @@ -215,15 +215,6 @@ func (r *ReconcileHumioCluster) ensurePodLabels(context context.Context, hc *cor return nil } -func podHasLabel(labels map[string]string, label string) bool { - for labelName := range labels { - if labelName == label { - return true - } - } - return false -} - func (r *ReconcileHumioCluster) ensurePartitionsAreBalanced(humioClusterController humio.ClusterController, hc *corev1alpha1.HumioCluster) error { partitionsBalanced, err := humioClusterController.AreStoragePartitionsBalanced(hc) if err != nil { diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index 3e53e6ef8..6fe6f1a05 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -254,7 +254,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { // Ensure that we add node_id label to all pods for _, pod := range foundPodList { - if !podHasLabel(pod.GetLabels(), "node_id") { + if !labelListContainsLabel(pod.GetLabels(), "node_id") { t.Errorf("expected pod %s to have label node_id", pod.Name) } } From 7b475cb9210adc54d1d7f8c6b3033e825112a411 Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Wed, 25 Mar 2020 11:56:19 -0700 Subject: [PATCH 3/3] Add test for image update --- .../humiocluster/humiocluster_controller.go | 17 +- .../humiocluster_controller_test.go | 201 +++++++++++++++++- 2 files changed, 212 insertions(+), 6 deletions(-) diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index daeede7cd..116951d2b 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -128,9 +128,10 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // Set defaults setDefaults(humioCluster) - // Set cluster status + // Assume we are bootstrapping if no cluster state is set. + // TODO: this is a workaround for the issue where humio pods cannot start up at the same time during the first boot if humioCluster.Status.ClusterState == "" { - humioCluster.Status.ClusterState = "Bootstrapping" + r.setClusterStatus(context.TODO(), "Boostrapping", humioCluster) } // Ensure developer password is a k8s secret @@ -153,6 +154,8 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. return result, err } + r.setClusterStatus(context.TODO(), "Running", humioCluster) + // Ensure service exists err = r.ensureServiceExists(context.TODO(), humioCluster) if err != nil { @@ -180,6 +183,13 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 30}, nil } +// setClusterStatus is used to change the cluster status +// TODO: we use this to determine if we should have a delay between startup of humio pods during bootstrap vs starting up pods during an image update +func (r *ReconcileHumioCluster) setClusterStatus(context context.Context, clusterState string, humioCluster *corev1alpha1.HumioCluster) error { + humioCluster.Status.ClusterState = clusterState + return r.client.Update(context, humioCluster) +} + func (r *ReconcileHumioCluster) ensurePodLabels(context context.Context, hc *corev1alpha1.HumioCluster) error { r.logger.Info("ensuring pod labels") cluster, err := r.humioClient.GetClusters() @@ -283,6 +293,7 @@ func (r *ReconcileHumioCluster) ensureMismatchedPodVersionsAreDeleted(conetext c // if container image versions of a pod differs, we want to delete it if pod.Spec.Containers[0].Image != humioCluster.Spec.Image { // TODO: figure out if we should only allow upgrades and not downgrades + r.logger.Info(fmt.Sprintf("deleting pod %s", pod.Name)) err = DeletePod(r.client, pod) if err != nil { return reconcile.Result{}, fmt.Errorf("could not delete pod %s, got err: %v", pod.Name, err) @@ -330,7 +341,7 @@ func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioC return reconcile.Result{}, nil } - if podsNotReadyCount > 0 { + if podsNotReadyCount > 0 && humioCluster.Status.ClusterState == "Bootstrapping" { r.logger.Info(fmt.Sprintf("there are %d humio pods that are not ready. all humio pods must report ready before reconciliation can continue", podsNotReadyCount)) return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil } diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index 6fe6f1a05..0d786f02e 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -156,10 +156,8 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { var foundPodList corev1.PodList - var matchingLabels client.MatchingLabels - matchingLabels = labelsForHumio(tt.humioCluster.Name) - cl.List(context.TODO(), &foundPodList, client.InNamespace(tt.humioCluster.Namespace), matchingLabels) + cl.List(context.TODO(), &foundPodList, client.InNamespace(tt.humioCluster.Namespace), matchingLabelsForHumio(tt.humioCluster.Name)) if len(foundPodList.Items) != nodeCount+1 { t.Errorf("expected list pods to return equal to %d, got %d", nodeCount+1, len(foundPodList.Items)) @@ -187,6 +185,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { if err != nil { t.Errorf("reconcile: (%v)", err) } + } // Check that the service exists @@ -261,3 +260,199 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { }) } } + +func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { + // Set the logger to development mode for verbose logs. + logf.SetLogger(logf.ZapLogger(true)) + + tests := []struct { + name string + humioCluster *humioClusterv1alpha1.HumioCluster + imageToUpdate string + }{ + { + "test simple cluster humio image update", + &humioClusterv1alpha1.HumioCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "humiocluster", + Namespace: "logging", + }, + Spec: humioClusterv1alpha1.HumioClusterSpec{ + Image: "humio/humio-core:1.9.1", + TargetReplicationFactor: 2, + StoragePartitionsCount: 3, + DigestPartitionsCount: 3, + NodeCount: 3, + }, + }, + "humio/humio-core:1.9.2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Objects to track in the fake client. + objs := []runtime.Object{ + tt.humioCluster, + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + s.AddKnownTypes(humioClusterv1alpha1.SchemeGroupVersion, tt.humioCluster) + + // Create a fake client to mock API calls. + cl := fake.NewFakeClient(objs...) + + // Start up http server that can send the mock jwt token + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Write([]byte(`{"token": "sometempjwttoken"}`)) + })) + defer server.Close() + + // TODO: create this above when we add more test cases + storagePartitions := []humioapi.StoragePartition{ + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0}, + }, + humioapi.StoragePartition{ + Id: 2, + NodeIds: []int{0}, + }, + humioapi.StoragePartition{ + Id: 3, + NodeIds: []int{0}, + }, + } + ingestPartitions := []humioapi.IngestPartition{ + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0}, + }, + humioapi.IngestPartition{ + Id: 2, + NodeIds: []int{0}, + }, + humioapi.IngestPartition{ + Id: 3, + NodeIds: []int{0}, + }, + } + humioClient := humio.NewMocklient( + humioapi.Cluster{ + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Uri: fmt.Sprintf("http://192.168.0.%d:8080", 0), + Id: 0, + IsAvailable: true, + }, + humioapi.ClusterNode{ + Uri: fmt.Sprintf("http://192.168.0.%d:8080", 1), + Id: 1, + IsAvailable: true, + }, + humioapi.ClusterNode{ + Uri: fmt.Sprintf("http://192.168.0.%d:8080", 2), + Id: 2, + IsAvailable: true, + }, + }, + StoragePartitions: storagePartitions, + IngestPartitions: ingestPartitions, + }, nil, nil, nil, fmt.Sprintf("%s/", server.URL)) + + // Create a ReconcileHumioCluster object with the scheme and fake client. + r := &ReconcileHumioCluster{ + client: cl, + humioClient: humioClient, + scheme: s, + } + + // Mock request to simulate Reconcile() being called on an event for a + // watched resource . + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tt.humioCluster.Name, + Namespace: tt.humioCluster.Namespace, + }, + } + _, err := r.Reconcile(req) + if err != nil { + t.Errorf("reconcile: (%v)", err) + } + + for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { + var foundPodList corev1.PodList + var matchingLabels client.MatchingLabels + + matchingLabels = labelsForHumio(tt.humioCluster.Name) + cl.List(context.TODO(), &foundPodList, client.InNamespace(tt.humioCluster.Namespace), matchingLabels) + + if len(foundPodList.Items) != nodeCount+1 { + t.Errorf("expected list pods to return equal to %d, got %d", nodeCount+1, len(foundPodList.Items)) + } + + // We must update the IP address because when we attempt to add labels to the pod we validate that they have IP addresses first + // We also must update the ready condition as the reconciler will wait until all pods are ready before continuing + for nodeID, pod := range foundPodList.Items { + //pod.Name = fmt.Sprintf("%s-core-somesuffix%d", tt.humioCluster.Name, nodeID) + pod.Status.PodIP = fmt.Sprintf("192.168.0.%d", nodeID) + pod.Status.Conditions = []corev1.PodCondition{ + corev1.PodCondition{ + Type: corev1.PodConditionType("Ready"), + Status: corev1.ConditionTrue, + }, + } + err := cl.Status().Update(context.TODO(), &pod) + if err != nil { + t.Errorf("failed to update pods to prepare for testing the labels: %s", err) + } + } + + // Reconcile again so Reconcile() checks pods and updates the HumioCluster resources' Status. + _, err = r.Reconcile(req) + if err != nil { + t.Errorf("reconcile: (%v)", err) + } + } + + // Update humio image + tt.humioCluster.Spec.Image = tt.imageToUpdate + cl.Update(context.TODO(), tt.humioCluster) + + for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { + res, err := r.Reconcile(req) + if err != nil { + t.Errorf("reconcile: (%v)", err) + } + if res != (reconcile.Result{Requeue: true}) { + t.Errorf("reconcile did not match expected %v", res) + } + } + + var foundPodList corev1.PodList + + cl.List(context.TODO(), &foundPodList, client.InNamespace(tt.humioCluster.Namespace), matchingLabelsForHumio(tt.humioCluster.Name)) + + if len(foundPodList.Items) != 0 { + t.Errorf("expected list pods to return equal to %d, got %d", 0, len(foundPodList.Items)) + } + + for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { + res, err := r.Reconcile(req) + if err != nil { + t.Errorf("reconcile: (%v)", err) + } + if res != (reconcile.Result{Requeue: true}) { + t.Errorf("reconcile did not match expected %v", res) + } + } + + cl.List(context.TODO(), &foundPodList, client.InNamespace(tt.humioCluster.Namespace), matchingLabelsForHumio(tt.humioCluster.Name)) + + if len(foundPodList.Items) != tt.humioCluster.Spec.NodeCount { + t.Errorf("expected list pods to return equal to %d, got %d", tt.humioCluster.Spec.NodeCount, len(foundPodList.Items)) + } + }) + } +}