From 1bb52e9bcdb19f0b2e88d156a565ae6aabb4403a Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 12:17:07 -0700 Subject: [PATCH 1/6] Fix cluster state issues and pod creation --- .../humiocluster/humiocluster_controller.go | 49 +++++++++++-- .../humiocluster_controller_test.go | 72 +++++++++++++++---- pkg/controller/humiocluster/pods.go | 4 +- 3 files changed, 103 insertions(+), 22 deletions(-) diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index 116951d2b..d57532586 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -131,7 +131,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // Assume we are bootstrapping if no cluster state is set. // TODO: this is a workaround for the issue where humio pods cannot start up at the same time during the first boot if humioCluster.Status.ClusterState == "" { - r.setClusterStatus(context.TODO(), "Boostrapping", humioCluster) + r.setClusterStatus(context.TODO(), "Bootstrapping", humioCluster) } // Ensure developer password is a k8s secret @@ -149,13 +149,20 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. } // Ensure pods exist. Will requeue if not all pods are created and ready + if humioCluster.Status.ClusterState == "Bootstrapping" { + result, err = r.ensurePodsBootstrapped(context.TODO(), humioCluster) + if result != emptyResult || err != nil { + return result, err + } + } + + r.setClusterStatus(context.TODO(), "Running", humioCluster) + result, err = r.ensurePodsExist(context.TODO(), humioCluster) if result != emptyResult || err != nil { return result, err } - r.setClusterStatus(context.TODO(), "Running", humioCluster) - // Ensure service exists err = r.ensureServiceExists(context.TODO(), humioCluster) if err != nil { @@ -173,6 +180,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{}, err } + // TODO: wait until all pods are ready before continuing clusterController := humio.NewClusterController(r.humioClient) err = r.ensurePartitionsAreBalanced(*clusterController, humioCluster) if err != nil { @@ -187,7 +195,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // TODO: we use this to determine if we should have a delay between startup of humio pods during bootstrap vs starting up pods during an image update func (r *ReconcileHumioCluster) setClusterStatus(context context.Context, clusterState string, humioCluster *corev1alpha1.HumioCluster) error { humioCluster.Status.ClusterState = clusterState - return r.client.Update(context, humioCluster) + return r.client.Status().Update(context, humioCluster) } func (r *ReconcileHumioCluster) ensurePodLabels(context context.Context, hc *corev1alpha1.HumioCluster) error { @@ -315,7 +323,7 @@ func (r *ReconcileHumioCluster) ensureMismatchedPodVersionsAreDeleted(conetext c // TODO: change to create 1 pod at a time, return Requeue=true and RequeueAfter. // check that other pods, if they exist, are in a ready state -func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) (reconcile.Result, error) { +func (r *ReconcileHumioCluster) ensurePodsBootstrapped(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) (reconcile.Result, error) { // Ensure we have pods for the defined NodeCount. // If scaling down, we will handle the extra/obsolete pods later. foundPodList, err := ListPods(r.client, humioCluster) @@ -341,7 +349,7 @@ func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioC return reconcile.Result{}, nil } - if podsNotReadyCount > 0 && humioCluster.Status.ClusterState == "Bootstrapping" { + if podsNotReadyCount > 0 { r.logger.Info(fmt.Sprintf("there are %d humio pods that are not ready. all humio pods must report ready before reconciliation can continue", podsNotReadyCount)) return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil } @@ -367,6 +375,35 @@ func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioC return reconcile.Result{}, nil } +func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) (reconcile.Result, error) { + // Ensure we have pods for the defined NodeCount. + // If scaling down, we will handle the extra/obsolete pods later. + foundPodList, err := ListPods(r.client, humioCluster) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list pods: %s", err) + } + + if len(foundPodList) < humioCluster.Spec.NodeCount { + pod, err := r.constructPod(humioCluster) + if err != nil { + return reconcile.Result{}, fmt.Errorf("unable to construct pod for HumioCluster: %v", err) + } + + err = r.client.Create(context.TODO(), pod) + if err != nil { + log.Info(fmt.Sprintf("unable to create pod: %v", err)) + return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 5}, fmt.Errorf("unable to create Pod for HumioCluster: %v", err) + } + log.Info(fmt.Sprintf("successfully created pod %s for HumioCluster %s", pod.Name, humioCluster.Name)) + metricPodsCreated.Inc() + // We have created a pod. Requeue immediately even if the pod is not ready. We will check the readiness status on the next reconciliation. + return reconcile.Result{Requeue: true}, nil + } + + // TODO: what should happen if we have more pods than are expected? + return reconcile.Result{}, nil +} + // TODO: extend this (or create separate method) to take this password and perform a login, get the jwt token and then call the api to get the persistent api token and also store that as a secret // this functionality should perhaps go into humio.cluster_auth.go func (r *ReconcileHumioCluster) ensureDeveloperUserPasswordExists(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) error { diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index 8d317e03a..c963eac12 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -120,8 +120,17 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("reconcile: (%v)", err) } + updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != "Bootstrapping" { + t.Errorf("expected cluster state to be %s but got %s", "Bootstrapping", updatedHumioCluster.Status.ClusterState) + } + // Check that the developer password exists as a k8s secret - secret, err := r.GetSecret(context.TODO(), tt.humioCluster, serviceAccountSecretName) + secret, err := r.GetSecret(context.TODO(), updatedHumioCluster, serviceAccountSecretName) if err != nil { t.Errorf("get secret with password: (%v). %+v", err, secret) } @@ -129,10 +138,10 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("password secret %s expected content to not be empty, but it was", serviceAccountSecretName) } - for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { - foundPodList, err := ListPods(cl, tt.humioCluster) - if len(foundPodList) != nodeCount+1 { - t.Errorf("expected list pods to return equal to %d, got %d", nodeCount+1, len(foundPodList)) + for nodeCount := 1; nodeCount <= tt.humioCluster.Spec.NodeCount; nodeCount++ { + foundPodList, err := ListPods(cl, updatedHumioCluster) + if len(foundPodList) != nodeCount { + t.Errorf("expected list pods to return equal to %d, got %d", nodeCount, len(foundPodList)) } // We must update the IP address because when we attempt to add labels to the pod we validate that they have IP addresses first @@ -147,18 +156,36 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { if err != nil { t.Errorf("reconcile: (%v)", err) } + } + // Check that we do not create more than expected number of humio pods + res, err = r.Reconcile(req) + if err != nil { + t.Errorf("reconcile: (%v)", err) + } + foundPodList, err := ListPods(cl, updatedHumioCluster) + if len(foundPodList) != tt.humioCluster.Spec.NodeCount { + t.Errorf("expected list pods to return equal to %d, got %d", tt.humioCluster.Spec.NodeCount, len(foundPodList)) + } + + updatedHumioCluster = &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != "Running" { + t.Errorf("expected cluster state to be %s but got %s", "Running", updatedHumioCluster.Status.ClusterState) } // Check that the service exists - service, err := r.GetService(context.TODO(), tt.humioCluster) + service, err := r.GetService(context.TODO(), updatedHumioCluster) if err != nil { t.Errorf("get service: (%v). %+v", err, service) } // Check that the persistent token exists as a k8s secret - token, err := r.GetSecret(context.TODO(), tt.humioCluster, serviceTokenSecretName) + token, err := r.GetSecret(context.TODO(), updatedHumioCluster, serviceTokenSecretName) if err != nil { t.Errorf("get secret with api token: (%v). %+v", err, token) } @@ -181,8 +208,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("expected api token in use to be \"%+v\", but got \"%+v\"", "mocktoken", tokenInUse) } - // Get the updated HumioCluster object. - updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + // Get the updated HumioCluster to update it with the partitions err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) @@ -197,7 +223,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("expected ingest partitions to be balanced. got %v, err %s", b, err) } - foundPodList, err := ListPods(cl, tt.humioCluster) + foundPodList, err = ListPods(cl, updatedHumioCluster) if err != nil { t.Errorf("could not list pods to validate their content: %v", err) } @@ -294,6 +320,16 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { t.Errorf("reconcile: (%v)", err) } + updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != "Bootstrapping" { + t.Errorf("expected cluster state to be %s but got %s", "Bootstrapping", updatedHumioCluster.Status.ClusterState) + } + tt.humioCluster = updatedHumioCluster + for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { foundPodList, err := ListPods(cl, tt.humioCluster) if len(foundPodList) != nodeCount+1 { @@ -314,9 +350,19 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } } + // Test that we're in a Running state + updatedHumioCluster = &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != "Running" { + t.Errorf("expected cluster state to be %s but got %s", "Running", updatedHumioCluster.Status.ClusterState) + } + // Update humio image - tt.humioCluster.Spec.Image = tt.imageToUpdate - cl.Update(context.TODO(), tt.humioCluster) + updatedHumioCluster.Spec.Image = tt.imageToUpdate + cl.Update(context.TODO(), updatedHumioCluster) for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { res, err := r.Reconcile(req) @@ -348,7 +394,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } } - foundPodList, err = ListPods(cl, tt.humioCluster) + foundPodList, err = ListPods(cl, updatedHumioCluster) if err != nil { t.Errorf("failed to list pods: %s", err) } diff --git a/pkg/controller/humiocluster/pods.go b/pkg/controller/humiocluster/pods.go index 8a338aa68..ffb0d9afb 100644 --- a/pkg/controller/humiocluster/pods.go +++ b/pkg/controller/humiocluster/pods.go @@ -130,9 +130,7 @@ func labelsForPod(clusterName string, nodeID int) map[string]string { func generatePodSuffix() string { rand.Seed(time.Now().UnixNano()) - chars := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "abcdefghijklmnopqrstuvwxyz" + - "0123456789") + chars := []rune("abcdefghijklmnopqrstuvwxyz") length := 6 var b strings.Builder for i := 0; i < length; i++ { From 69f1cbeb63b4b7b463454bf9aeeb581d80141bce Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 15:58:07 -0700 Subject: [PATCH 2/6] Make cluster state a variable in the spec --- pkg/apis/core/v1alpha1/humiocluster_types.go | 7 +++ .../humiocluster/humiocluster_controller.go | 6 +-- .../humiocluster_controller_test.go | 46 +++++++++---------- 3 files changed, 33 insertions(+), 26 deletions(-) diff --git a/pkg/apis/core/v1alpha1/humiocluster_types.go b/pkg/apis/core/v1alpha1/humiocluster_types.go index c44f3cb53..39540a7da 100644 --- a/pkg/apis/core/v1alpha1/humiocluster_types.go +++ b/pkg/apis/core/v1alpha1/humiocluster_types.go @@ -5,6 +5,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + // HumioClusterStateBoostrapping is the Bootstrapping state of the cluster + HumioClusterStateBoostrapping = "Bootstrapping" + // HumioClusterStateRunning is the Running state of the cluster + HumioClusterStateRunning = "Running" +) + // HumioClusterSpec defines the desired state of HumioCluster type HumioClusterSpec struct { // Desired container image including the image tag diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index d57532586..ffa141010 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -131,7 +131,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // Assume we are bootstrapping if no cluster state is set. // TODO: this is a workaround for the issue where humio pods cannot start up at the same time during the first boot if humioCluster.Status.ClusterState == "" { - r.setClusterStatus(context.TODO(), "Bootstrapping", humioCluster) + r.setClusterStatus(context.TODO(), corev1alpha1.HumioClusterStateBoostrapping, humioCluster) } // Ensure developer password is a k8s secret @@ -149,14 +149,14 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. } // Ensure pods exist. Will requeue if not all pods are created and ready - if humioCluster.Status.ClusterState == "Bootstrapping" { + if humioCluster.Status.ClusterState == corev1alpha1.HumioClusterStateBoostrapping { result, err = r.ensurePodsBootstrapped(context.TODO(), humioCluster) if result != emptyResult || err != nil { return result, err } } - r.setClusterStatus(context.TODO(), "Running", humioCluster) + r.setClusterStatus(context.TODO(), corev1alpha1.HumioClusterStateRunning, humioCluster) result, err = r.ensurePodsExist(context.TODO(), humioCluster) if result != emptyResult || err != nil { diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index c963eac12..b23415169 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -9,7 +9,7 @@ import ( "time" humioapi "github.com/humio/cli/api" - humioClusterv1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" + corev1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" "github.com/humio/humio-operator/pkg/humio" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,17 +28,17 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { tests := []struct { name string - humioCluster *humioClusterv1alpha1.HumioCluster + humioCluster *corev1alpha1.HumioCluster humioClient *humio.MockClientConfig }{ { "test simple cluster reconciliation", - &humioClusterv1alpha1.HumioCluster{ + &corev1alpha1.HumioCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "humiocluster", Namespace: "logging", }, - Spec: humioClusterv1alpha1.HumioClusterSpec{ + Spec: corev1alpha1.HumioClusterSpec{ Image: "humio/humio-core:1.9.1", TargetReplicationFactor: 2, StoragePartitionsCount: 3, @@ -55,12 +55,12 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { }, { "test large cluster reconciliation", - &humioClusterv1alpha1.HumioCluster{ + &corev1alpha1.HumioCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "humiocluster", Namespace: "logging", }, - Spec: humioClusterv1alpha1.HumioClusterSpec{ + Spec: corev1alpha1.HumioClusterSpec{ Image: "humio/humio-core:1.9.1", TargetReplicationFactor: 3, StoragePartitionsCount: 72, @@ -86,7 +86,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { // Register operator types with the runtime scheme. s := scheme.Scheme - s.AddKnownTypes(humioClusterv1alpha1.SchemeGroupVersion, tt.humioCluster) + s.AddKnownTypes(corev1alpha1.SchemeGroupVersion, tt.humioCluster) // Create a fake client to mock API calls. cl := fake.NewFakeClient(objs...) @@ -120,13 +120,13 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("reconcile: (%v)", err) } - updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + updatedHumioCluster := &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) } - if updatedHumioCluster.Status.ClusterState != "Bootstrapping" { - t.Errorf("expected cluster state to be %s but got %s", "Bootstrapping", updatedHumioCluster.Status.ClusterState) + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateBoostrapping { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateBoostrapping, updatedHumioCluster.Status.ClusterState) } // Check that the developer password exists as a k8s secret @@ -168,13 +168,13 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("expected list pods to return equal to %d, got %d", tt.humioCluster.Spec.NodeCount, len(foundPodList)) } - updatedHumioCluster = &humioClusterv1alpha1.HumioCluster{} + updatedHumioCluster = &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) } - if updatedHumioCluster.Status.ClusterState != "Running" { - t.Errorf("expected cluster state to be %s but got %s", "Running", updatedHumioCluster.Status.ClusterState) + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } // Check that the service exists @@ -248,18 +248,18 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { tests := []struct { name string - humioCluster *humioClusterv1alpha1.HumioCluster + humioCluster *corev1alpha1.HumioCluster humioClient *humio.MockClientConfig imageToUpdate string }{ { "test simple cluster humio image update", - &humioClusterv1alpha1.HumioCluster{ + &corev1alpha1.HumioCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "humiocluster", Namespace: "logging", }, - Spec: humioClusterv1alpha1.HumioClusterSpec{ + Spec: corev1alpha1.HumioClusterSpec{ Image: "humio/humio-core:1.9.1", TargetReplicationFactor: 2, StoragePartitionsCount: 3, @@ -286,7 +286,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { // Register operator types with the runtime scheme. s := scheme.Scheme - s.AddKnownTypes(humioClusterv1alpha1.SchemeGroupVersion, tt.humioCluster) + s.AddKnownTypes(corev1alpha1.SchemeGroupVersion, tt.humioCluster) // Create a fake client to mock API calls. cl := fake.NewFakeClient(objs...) @@ -320,13 +320,13 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { t.Errorf("reconcile: (%v)", err) } - updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + updatedHumioCluster := &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) } - if updatedHumioCluster.Status.ClusterState != "Bootstrapping" { - t.Errorf("expected cluster state to be %s but got %s", "Bootstrapping", updatedHumioCluster.Status.ClusterState) + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateBoostrapping { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateBoostrapping, updatedHumioCluster.Status.ClusterState) } tt.humioCluster = updatedHumioCluster @@ -351,13 +351,13 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } // Test that we're in a Running state - updatedHumioCluster = &humioClusterv1alpha1.HumioCluster{} + updatedHumioCluster = &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) } - if updatedHumioCluster.Status.ClusterState != "Running" { - t.Errorf("expected cluster state to be %s but got %s", "Running", updatedHumioCluster.Status.ClusterState) + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } // Update humio image From aa8cfa0fb16abc8e796361683a03775993311c8c Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 16:18:53 -0700 Subject: [PATCH 3/6] Add cluster state to the get output fields --- deploy/crds/core.humio.com_humioclusters_crd.yaml | 5 +++++ pkg/apis/core/v1alpha1/humiocluster_types.go | 1 + 2 files changed, 6 insertions(+) diff --git a/deploy/crds/core.humio.com_humioclusters_crd.yaml b/deploy/crds/core.humio.com_humioclusters_crd.yaml index a1a276f12..7c62bc2df 100644 --- a/deploy/crds/core.humio.com_humioclusters_crd.yaml +++ b/deploy/crds/core.humio.com_humioclusters_crd.yaml @@ -3,6 +3,11 @@ kind: CustomResourceDefinition metadata: name: humioclusters.core.humio.com spec: + additionalPrinterColumns: + - JSONPath: .status.clusterState + description: The state of the cluster + name: State + type: string group: core.humio.com names: kind: HumioCluster diff --git a/pkg/apis/core/v1alpha1/humiocluster_types.go b/pkg/apis/core/v1alpha1/humiocluster_types.go index 39540a7da..dfe723bef 100644 --- a/pkg/apis/core/v1alpha1/humiocluster_types.go +++ b/pkg/apis/core/v1alpha1/humiocluster_types.go @@ -43,6 +43,7 @@ type HumioClusterStatus struct { // HumioCluster is the Schema for the humioclusters API // +kubebuilder:subresource:status // +kubebuilder:resource:path=humioclusters,scope=Namespaced +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.clusterState",description="The state of the cluster" type HumioCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` From ea60424e4ff2cf93607add886d231fa6dd1535d0 Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 17:17:37 -0700 Subject: [PATCH 4/6] Update tt.humioCluster to the updatedCluster in a couple places --- pkg/controller/humiocluster/humiocluster_controller_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index b23415169..bb5a1809f 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -331,7 +331,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { tt.humioCluster = updatedHumioCluster for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { - foundPodList, err := ListPods(cl, tt.humioCluster) + foundPodList, err := ListPods(cl, updatedHumioCluster) if len(foundPodList) != nodeCount+1 { t.Errorf("expected list pods to return equal to %d, got %d", nodeCount+1, len(foundPodList)) } @@ -375,7 +375,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } // Ensure all the pods are shut down to prep for the image update - foundPodList, err := ListPods(cl, tt.humioCluster) + foundPodList, err := ListPods(cl, updatedHumioCluster) if err != nil { t.Errorf("failed to list pods: %s", err) } From 9207f632106cec88229932ea10b19c82262e610a Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 17:13:04 -0700 Subject: [PATCH 5/6] Add version and node count to the cluster state --- .../core.humio.com_humioclusters_crd.yaml | 17 ++++++++-- pkg/apis/core/v1alpha1/humiocluster_types.go | 8 ++++- .../humiocluster/humiocluster_controller.go | 30 ++++++++++++++--- .../humiocluster_controller_test.go | 32 ++++++++++++++++++- 4 files changed, 79 insertions(+), 8 deletions(-) diff --git a/deploy/crds/core.humio.com_humioclusters_crd.yaml b/deploy/crds/core.humio.com_humioclusters_crd.yaml index 7c62bc2df..939c675e6 100644 --- a/deploy/crds/core.humio.com_humioclusters_crd.yaml +++ b/deploy/crds/core.humio.com_humioclusters_crd.yaml @@ -8,6 +8,14 @@ spec: description: The state of the cluster name: State type: string + - JSONPath: .status.clusterNodeCount + description: The number of nodes in the cluster + name: Nodes + type: string + - JSONPath: .status.clusterVersion + description: The version of humior + name: Version + type: string group: core.humio.com names: kind: HumioCluster @@ -156,10 +164,15 @@ spec: allDataAvailable: description: Current state set by operator. type: string + clusterNodeCount: + description: ClusterNodeCount is the number of nodes of humio running + type: integer clusterState: description: 'ClusterState will be empty before the cluster is bootstrapped. - From there it can be "Bootstrapping" or "Operational" TODO: other - states?' + From there it can be "Bootstrapping" or "Running" TODO: other states?' + type: string + clusterVersion: + description: ClusterVersion is the version of humio running type: string stateLastUpdated: format: int64 diff --git a/pkg/apis/core/v1alpha1/humiocluster_types.go b/pkg/apis/core/v1alpha1/humiocluster_types.go index dfe723bef..0211eb742 100644 --- a/pkg/apis/core/v1alpha1/humiocluster_types.go +++ b/pkg/apis/core/v1alpha1/humiocluster_types.go @@ -33,9 +33,13 @@ type HumioClusterStatus struct { StateLastUpdatedUnix int64 `json:"stateLastUpdated,omitempty"` // Current state set by operator. AllDataAvailable string `json:"allDataAvailable,omitempty"` - // ClusterState will be empty before the cluster is bootstrapped. From there it can be "Bootstrapping" or "Operational" + // ClusterState will be empty before the cluster is bootstrapped. From there it can be "Bootstrapping" or "Running" // TODO: other states? ClusterState string `json:"clusterState,omitempty"` + // ClusterVersion is the version of humio running + ClusterVersion string `json:"clusterVersion,omitempty"` + // ClusterNodeCount is the number of nodes of humio running + ClusterNodeCount int `json:"clusterNodeCount,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -44,6 +48,8 @@ type HumioClusterStatus struct { // +kubebuilder:subresource:status // +kubebuilder:resource:path=humioclusters,scope=Namespaced // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.clusterState",description="The state of the cluster" +// +kubebuilder:printcolumn:name="Nodes",type="string",JSONPath=".status.clusterNodeCount",description="The number of nodes in the cluster" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.clusterVersion",description="The version of humior" type HumioCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index ffa141010..a00514ad1 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -3,6 +3,7 @@ package humiocluster import ( "context" "fmt" + "strings" "time" "github.com/go-logr/logr" @@ -131,7 +132,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // Assume we are bootstrapping if no cluster state is set. // TODO: this is a workaround for the issue where humio pods cannot start up at the same time during the first boot if humioCluster.Status.ClusterState == "" { - r.setClusterStatus(context.TODO(), corev1alpha1.HumioClusterStateBoostrapping, humioCluster) + r.setClusterState(context.TODO(), corev1alpha1.HumioClusterStateBoostrapping, humioCluster) } // Ensure developer password is a k8s secret @@ -156,7 +157,18 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. } } - r.setClusterStatus(context.TODO(), corev1alpha1.HumioClusterStateRunning, humioCluster) + r.setClusterState(context.TODO(), corev1alpha1.HumioClusterStateRunning, humioCluster) + + defer func(context context.Context, humioCluster *corev1alpha1.HumioCluster) { + pods, _ := ListPods(r.client, humioCluster) + r.setClusterNodeCount(context, len(pods), humioCluster) + }(context.TODO(), humioCluster) + + // TODO: get cluster version from humio api + defer func(context context.Context, humioClient humio.Client, humioCluster *corev1alpha1.HumioCluster) { + version := strings.Split(humioCluster.Spec.Image, ":")[1] + r.setClusterVersion(context, version, humioCluster) + }(context.TODO(), r.humioClient, humioCluster) result, err = r.ensurePodsExist(context.TODO(), humioCluster) if result != emptyResult || err != nil { @@ -191,13 +203,23 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 30}, nil } -// setClusterStatus is used to change the cluster status +// setClusterState is used to change the cluster state // TODO: we use this to determine if we should have a delay between startup of humio pods during bootstrap vs starting up pods during an image update -func (r *ReconcileHumioCluster) setClusterStatus(context context.Context, clusterState string, humioCluster *corev1alpha1.HumioCluster) error { +func (r *ReconcileHumioCluster) setClusterState(context context.Context, clusterState string, humioCluster *corev1alpha1.HumioCluster) error { humioCluster.Status.ClusterState = clusterState return r.client.Status().Update(context, humioCluster) } +func (r *ReconcileHumioCluster) setClusterVersion(context context.Context, clusterVersion string, humioCluster *corev1alpha1.HumioCluster) error { + humioCluster.Status.ClusterVersion = clusterVersion + return r.client.Status().Update(context, humioCluster) +} + +func (r *ReconcileHumioCluster) setClusterNodeCount(context context.Context, clusterNodeCount int, humioCluster *corev1alpha1.HumioCluster) error { + humioCluster.Status.ClusterNodeCount = clusterNodeCount + return r.client.Status().Update(context, humioCluster) +} + func (r *ReconcileHumioCluster) ensurePodLabels(context context.Context, hc *corev1alpha1.HumioCluster) error { r.logger.Info("ensuring pod labels") cluster, err := r.humioClient.GetClusters() diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index bb5a1809f..32938c90b 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "net/http/httptest" + "strings" "testing" "time" @@ -168,6 +169,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("expected list pods to return equal to %d, got %d", tt.humioCluster.Spec.NodeCount, len(foundPodList)) } + // Test that we have the proper status updatedHumioCluster = &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { @@ -176,6 +178,12 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } + if updatedHumioCluster.Status.ClusterVersion != strings.Split(tt.humioCluster.Spec.Image, ":")[1] { + t.Errorf("expected cluster version to be %s but got %s", strings.Split(tt.humioCluster.Spec.Image, ":")[1], updatedHumioCluster.Status.ClusterVersion) + } + if updatedHumioCluster.Status.ClusterNodeCount != tt.humioCluster.Spec.NodeCount { + t.Errorf("expected node count to be %d but got %d", tt.humioCluster.Spec.NodeCount, updatedHumioCluster.Status.ClusterNodeCount) + } // Check that the service exists service, err := r.GetService(context.TODO(), updatedHumioCluster) @@ -350,7 +358,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } } - // Test that we're in a Running state + // Test that we have the proper status updatedHumioCluster = &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { @@ -359,6 +367,12 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } + if updatedHumioCluster.Status.ClusterVersion != strings.Split(tt.humioCluster.Spec.Image, ":")[1] { + t.Errorf("expected cluster version to be %s but got %s", strings.Split(tt.humioCluster.Spec.Image, ":")[1], updatedHumioCluster.Status.ClusterVersion) + } + if updatedHumioCluster.Status.ClusterNodeCount != tt.humioCluster.Spec.NodeCount { + t.Errorf("expected node count to be %d but got %d", tt.humioCluster.Spec.NodeCount, updatedHumioCluster.Status.ClusterNodeCount) + } // Update humio image updatedHumioCluster.Spec.Image = tt.imageToUpdate @@ -401,6 +415,22 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { if len(foundPodList) != tt.humioCluster.Spec.NodeCount { t.Errorf("expected list pods to return equal to %d, got %d", tt.humioCluster.Spec.NodeCount, len(foundPodList)) } + + // Test that we have the proper status + updatedHumioCluster = &corev1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) + } + if updatedHumioCluster.Status.ClusterVersion != strings.Split(tt.imageToUpdate, ":")[1] { + t.Errorf("expected cluster version to be %s but got %s", strings.Split(tt.imageToUpdate, ":")[1], updatedHumioCluster.Status.ClusterVersion) + } + if updatedHumioCluster.Status.ClusterNodeCount != tt.humioCluster.Spec.NodeCount { + t.Errorf("expected node count to be %d but got %d", tt.humioCluster.Spec.NodeCount, updatedHumioCluster.Status.ClusterNodeCount) + } }) } } From 7d48ec2a5cd61d089d95ec36a08182699d42e0ed Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Fri, 27 Mar 2020 09:04:20 -0700 Subject: [PATCH 6/6] Use the humio version from the api rather than from the spec --- .../humiocluster/humiocluster_controller.go | 8 ++-- .../humiocluster_controller_test.go | 24 ++++++----- pkg/humio/client.go | 10 +++++ pkg/humio/client_mock.go | 13 +++++- pkg/humio/cluster_test.go | 40 +++++++++---------- 5 files changed, 60 insertions(+), 35 deletions(-) diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index a00514ad1..6bed8afe4 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -3,7 +3,6 @@ package humiocluster import ( "context" "fmt" - "strings" "time" "github.com/go-logr/logr" @@ -166,8 +165,11 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // TODO: get cluster version from humio api defer func(context context.Context, humioClient humio.Client, humioCluster *corev1alpha1.HumioCluster) { - version := strings.Split(humioCluster.Spec.Image, ":")[1] - r.setClusterVersion(context, version, humioCluster) + status, err := humioClient.Status() + if err != nil { + r.logger.Info("unable to get status: %s", err) + } + r.setClusterVersion(context, status.Version, humioCluster) }(context.TODO(), r.humioClient, humioCluster) result, err = r.ensurePodsExist(context.TODO(), humioCluster) diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index 32938c90b..d6e4af981 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -5,7 +5,6 @@ import ( "fmt" "net/http" "net/http/httptest" - "strings" "testing" "time" @@ -31,6 +30,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { name string humioCluster *corev1alpha1.HumioCluster humioClient *humio.MockClientConfig + version string }{ { "test simple cluster reconciliation", @@ -52,7 +52,8 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { Nodes: buildClusterNodesList(3), StoragePartitions: buildStoragePartitionsList(3, 1), IngestPartitions: buildIngestPartitionsList(3, 1), - }, nil, nil, nil, ""), + }, nil, nil, nil, "", "1.9.2--build-12365--sha-bf4188482a"), + "1.9.2--build-12365--sha-bf4188482a", }, { "test large cluster reconciliation", @@ -74,7 +75,8 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { Nodes: buildClusterNodesList(18), StoragePartitions: buildStoragePartitionsList(72, 2), IngestPartitions: buildIngestPartitionsList(72, 2), - }, nil, nil, nil, ""), + }, nil, nil, nil, "", "1.9.2--build-12365--sha-bf4188482a"), + "1.9.2--build-12365--sha-bf4188482a", }, } for _, tt := range tests { @@ -178,8 +180,8 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } - if updatedHumioCluster.Status.ClusterVersion != strings.Split(tt.humioCluster.Spec.Image, ":")[1] { - t.Errorf("expected cluster version to be %s but got %s", strings.Split(tt.humioCluster.Spec.Image, ":")[1], updatedHumioCluster.Status.ClusterVersion) + if updatedHumioCluster.Status.ClusterVersion != tt.version { + t.Errorf("expected cluster version to be %s but got %s", tt.version, updatedHumioCluster.Status.ClusterVersion) } if updatedHumioCluster.Status.ClusterNodeCount != tt.humioCluster.Spec.NodeCount { t.Errorf("expected node count to be %d but got %d", tt.humioCluster.Spec.NodeCount, updatedHumioCluster.Status.ClusterNodeCount) @@ -259,6 +261,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { humioCluster *corev1alpha1.HumioCluster humioClient *humio.MockClientConfig imageToUpdate string + version string }{ { "test simple cluster humio image update", @@ -280,8 +283,9 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { Nodes: buildClusterNodesList(3), StoragePartitions: buildStoragePartitionsList(3, 1), IngestPartitions: buildIngestPartitionsList(3, 1), - }, nil, nil, nil, ""), + }, nil, nil, nil, "", "1.9.2--build-12365--sha-bf4188482a"), "humio/humio-core:1.9.2", + "1.9.2--build-12365--sha-bf4188482a", }, } for _, tt := range tests { @@ -367,8 +371,8 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } - if updatedHumioCluster.Status.ClusterVersion != strings.Split(tt.humioCluster.Spec.Image, ":")[1] { - t.Errorf("expected cluster version to be %s but got %s", strings.Split(tt.humioCluster.Spec.Image, ":")[1], updatedHumioCluster.Status.ClusterVersion) + if updatedHumioCluster.Status.ClusterVersion != tt.version { + t.Errorf("expected cluster version to be %s but got %s", tt.version, updatedHumioCluster.Status.ClusterVersion) } if updatedHumioCluster.Status.ClusterNodeCount != tt.humioCluster.Spec.NodeCount { t.Errorf("expected node count to be %d but got %d", tt.humioCluster.Spec.NodeCount, updatedHumioCluster.Status.ClusterNodeCount) @@ -425,8 +429,8 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } - if updatedHumioCluster.Status.ClusterVersion != strings.Split(tt.imageToUpdate, ":")[1] { - t.Errorf("expected cluster version to be %s but got %s", strings.Split(tt.imageToUpdate, ":")[1], updatedHumioCluster.Status.ClusterVersion) + if updatedHumioCluster.Status.ClusterVersion != tt.version { + t.Errorf("expected cluster version to be %s but got %s", tt.version, updatedHumioCluster.Status.ClusterVersion) } if updatedHumioCluster.Status.ClusterNodeCount != tt.humioCluster.Spec.NodeCount { t.Errorf("expected node count to be %d but got %d", tt.humioCluster.Spec.NodeCount, updatedHumioCluster.Status.ClusterNodeCount) diff --git a/pkg/humio/client.go b/pkg/humio/client.go index bd728d081..73ad9af91 100644 --- a/pkg/humio/client.go +++ b/pkg/humio/client.go @@ -22,6 +22,7 @@ type Client interface { ApiToken() (string, error) Authenticate(*humioapi.Config) error GetBaseURL(*corev1alpha1.HumioCluster) string + Status() (humioapi.StatusResponse, error) } // ClientConfig stores our Humio api client @@ -56,6 +57,15 @@ func (h *ClientConfig) Authenticate(config *humioapi.Config) error { return nil } +// Status returns the status of the humio cluster +func (h *ClientConfig) Status() (humioapi.StatusResponse, error) { + status, err := h.apiClient.Status() + if err != nil { + log.Error(fmt.Sprintf("could not get status: %v", err)) + } + return *status, err +} + // GetClusters returns a humio cluster and can be mocked via the Client interface func (h *ClientConfig) GetClusters() (humioapi.Cluster, error) { clusters, err := h.apiClient.Clusters().Get() diff --git a/pkg/humio/client_mock.go b/pkg/humio/client_mock.go index a8362d0fa..b133103e8 100644 --- a/pkg/humio/client_mock.go +++ b/pkg/humio/client_mock.go @@ -15,9 +15,10 @@ type ClientMock struct { type MockClientConfig struct { apiClient *ClientMock Url string + Version string } -func NewMocklient(cluster humioapi.Cluster, clusterError error, updateStoragePartitionSchemeError error, updateIngestPartitionSchemeError error, url string) *MockClientConfig { +func NewMocklient(cluster humioapi.Cluster, clusterError error, updateStoragePartitionSchemeError error, updateIngestPartitionSchemeError error, url string, version string) *MockClientConfig { storagePartition := humioapi.StoragePartition{} ingestPartition := humioapi.IngestPartition{} @@ -28,7 +29,8 @@ func NewMocklient(cluster humioapi.Cluster, clusterError error, updateStoragePar UpdateStoragePartitionSchemeError: updateStoragePartitionSchemeError, UpdateIngestPartitionSchemeError: updateIngestPartitionSchemeError, }, - Url: url, + Url: url, + Version: version, } cluster.StoragePartitions = []humioapi.StoragePartition{storagePartition} @@ -41,6 +43,13 @@ func (h *MockClientConfig) Authenticate(config *humioapi.Config) error { return nil } +func (h *MockClientConfig) Status() (humioapi.StatusResponse, error) { + return humioapi.StatusResponse{ + Status: "OK", + Version: h.Version, + }, nil +} + func (h *MockClientConfig) GetClusters() (humioapi.Cluster, error) { if h.apiClient.ClusterError != nil { return humioapi.Cluster{}, h.apiClient.ClusterError diff --git a/pkg/humio/cluster_test.go b/pkg/humio/cluster_test.go index 0161e0fa6..52c565383 100644 --- a/pkg/humio/cluster_test.go +++ b/pkg/humio/cluster_test.go @@ -24,7 +24,7 @@ func TestClusterController_AreAllRegisteredNodesAvailable(t *testing.T) { humioapi.Cluster{ Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ IsAvailable: true, - }}}, nil, nil, nil, ""), + }}}, nil, nil, nil, "", ""), }, true, false, @@ -35,7 +35,7 @@ func TestClusterController_AreAllRegisteredNodesAvailable(t *testing.T) { humioapi.Cluster{ Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ IsAvailable: false, - }}}, nil, nil, nil, ""), + }}}, nil, nil, nil, "", ""), }, false, false, @@ -72,7 +72,7 @@ func TestClusterController_NoDataMissing(t *testing.T) { fields{NewMocklient( humioapi.Cluster{ MissingSegmentSize: 0, - }, nil, nil, nil, ""), + }, nil, nil, nil, "", ""), }, true, false, @@ -82,7 +82,7 @@ func TestClusterController_NoDataMissing(t *testing.T) { fields{NewMocklient( humioapi.Cluster{ MissingSegmentSize: 1, - }, nil, nil, nil, ""), + }, nil, nil, nil, "", ""), }, false, false, @@ -124,7 +124,7 @@ func TestClusterController_IsNodeRegistered(t *testing.T) { humioapi.Cluster{ Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ Id: 1, - }}}, nil, nil, nil, ""), + }}}, nil, nil, nil, "", ""), }, args{ nodeID: 1, @@ -138,7 +138,7 @@ func TestClusterController_IsNodeRegistered(t *testing.T) { humioapi.Cluster{ Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ Id: 2, - }}}, nil, nil, nil, ""), + }}}, nil, nil, nil, "", ""), }, args{ nodeID: 1, @@ -177,7 +177,7 @@ func TestClusterController_CountNodesRegistered(t *testing.T) { "test count registered nodes", fields{NewMocklient( humioapi.Cluster{ - Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{}}}, nil, nil, nil, ""), + Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{}}}, nil, nil, nil, "", ""), }, 1, false, @@ -185,7 +185,7 @@ func TestClusterController_CountNodesRegistered(t *testing.T) { { "test count no registered nodes", fields{NewMocklient( - humioapi.Cluster{}, nil, nil, nil, ""), + humioapi.Cluster{}, nil, nil, nil, "", ""), }, 0, false, @@ -228,7 +228,7 @@ func TestClusterController_CanBeSafelyUnregistered(t *testing.T) { Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ Id: 1, CanBeSafelyUnregistered: true, - }}}, nil, nil, nil, ""), + }}}, nil, nil, nil, "", ""), }, args{ podID: 1, @@ -243,7 +243,7 @@ func TestClusterController_CanBeSafelyUnregistered(t *testing.T) { Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ Id: 1, CanBeSafelyUnregistered: false, - }}}, nil, nil, nil, ""), + }}}, nil, nil, nil, "", ""), }, args{ podID: 1, @@ -310,7 +310,7 @@ func TestClusterController_IsStoragePartitionsBalanced(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), }, args{ &corev1alpha1.HumioCluster{ @@ -350,7 +350,7 @@ func TestClusterController_IsStoragePartitionsBalanced(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), }, args{ &corev1alpha1.HumioCluster{ @@ -390,7 +390,7 @@ func TestClusterController_IsStoragePartitionsBalanced(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), }, args{ &corev1alpha1.HumioCluster{ @@ -430,7 +430,7 @@ func TestClusterController_IsStoragePartitionsBalanced(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), }, args{ &corev1alpha1.HumioCluster{ @@ -502,7 +502,7 @@ func TestClusterController_RebalanceStoragePartitions(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), &[]humioapi.StoragePartition{ humioapi.StoragePartition{ Id: 0, @@ -594,7 +594,7 @@ func TestClusterController_AreIngestPartitionsBalanced(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), }, args{ &corev1alpha1.HumioCluster{ @@ -634,7 +634,7 @@ func TestClusterController_AreIngestPartitionsBalanced(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), }, args{ &corev1alpha1.HumioCluster{ @@ -674,7 +674,7 @@ func TestClusterController_AreIngestPartitionsBalanced(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), }, args{ &corev1alpha1.HumioCluster{ @@ -714,7 +714,7 @@ func TestClusterController_AreIngestPartitionsBalanced(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), }, args{ &corev1alpha1.HumioCluster{ @@ -786,7 +786,7 @@ func TestClusterController_RebalanceIngestPartitions(t *testing.T) { humioapi.ClusterNode{ Id: 2, }, - }}, nil, nil, nil, ""), + }}, nil, nil, nil, "", ""), &[]humioapi.IngestPartition{ humioapi.IngestPartition{ Id: 0,