From 1bb52e9bcdb19f0b2e88d156a565ae6aabb4403a Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 12:17:07 -0700 Subject: [PATCH 1/4] Fix cluster state issues and pod creation --- .../humiocluster/humiocluster_controller.go | 49 +++++++++++-- .../humiocluster_controller_test.go | 72 +++++++++++++++---- pkg/controller/humiocluster/pods.go | 4 +- 3 files changed, 103 insertions(+), 22 deletions(-) diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index 116951d2b..d57532586 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -131,7 +131,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // Assume we are bootstrapping if no cluster state is set. // TODO: this is a workaround for the issue where humio pods cannot start up at the same time during the first boot if humioCluster.Status.ClusterState == "" { - r.setClusterStatus(context.TODO(), "Boostrapping", humioCluster) + r.setClusterStatus(context.TODO(), "Bootstrapping", humioCluster) } // Ensure developer password is a k8s secret @@ -149,13 +149,20 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. } // Ensure pods exist. Will requeue if not all pods are created and ready + if humioCluster.Status.ClusterState == "Bootstrapping" { + result, err = r.ensurePodsBootstrapped(context.TODO(), humioCluster) + if result != emptyResult || err != nil { + return result, err + } + } + + r.setClusterStatus(context.TODO(), "Running", humioCluster) + result, err = r.ensurePodsExist(context.TODO(), humioCluster) if result != emptyResult || err != nil { return result, err } - r.setClusterStatus(context.TODO(), "Running", humioCluster) - // Ensure service exists err = r.ensureServiceExists(context.TODO(), humioCluster) if err != nil { @@ -173,6 +180,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{}, err } + // TODO: wait until all pods are ready before continuing clusterController := humio.NewClusterController(r.humioClient) err = r.ensurePartitionsAreBalanced(*clusterController, humioCluster) if err != nil { @@ -187,7 +195,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // TODO: we use this to determine if we should have a delay between startup of humio pods during bootstrap vs starting up pods during an image update func (r *ReconcileHumioCluster) setClusterStatus(context context.Context, clusterState string, humioCluster *corev1alpha1.HumioCluster) error { humioCluster.Status.ClusterState = clusterState - return r.client.Update(context, humioCluster) + return r.client.Status().Update(context, humioCluster) } func (r *ReconcileHumioCluster) ensurePodLabels(context context.Context, hc *corev1alpha1.HumioCluster) error { @@ -315,7 +323,7 @@ func (r *ReconcileHumioCluster) ensureMismatchedPodVersionsAreDeleted(conetext c // TODO: change to create 1 pod at a time, return Requeue=true and RequeueAfter. // check that other pods, if they exist, are in a ready state -func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) (reconcile.Result, error) { +func (r *ReconcileHumioCluster) ensurePodsBootstrapped(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) (reconcile.Result, error) { // Ensure we have pods for the defined NodeCount. // If scaling down, we will handle the extra/obsolete pods later. foundPodList, err := ListPods(r.client, humioCluster) @@ -341,7 +349,7 @@ func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioC return reconcile.Result{}, nil } - if podsNotReadyCount > 0 && humioCluster.Status.ClusterState == "Bootstrapping" { + if podsNotReadyCount > 0 { r.logger.Info(fmt.Sprintf("there are %d humio pods that are not ready. all humio pods must report ready before reconciliation can continue", podsNotReadyCount)) return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil } @@ -367,6 +375,35 @@ func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioC return reconcile.Result{}, nil } +func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) (reconcile.Result, error) { + // Ensure we have pods for the defined NodeCount. + // If scaling down, we will handle the extra/obsolete pods later. + foundPodList, err := ListPods(r.client, humioCluster) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list pods: %s", err) + } + + if len(foundPodList) < humioCluster.Spec.NodeCount { + pod, err := r.constructPod(humioCluster) + if err != nil { + return reconcile.Result{}, fmt.Errorf("unable to construct pod for HumioCluster: %v", err) + } + + err = r.client.Create(context.TODO(), pod) + if err != nil { + log.Info(fmt.Sprintf("unable to create pod: %v", err)) + return reconcile.Result{Requeue: true, RequeueAfter: time.Second * 5}, fmt.Errorf("unable to create Pod for HumioCluster: %v", err) + } + log.Info(fmt.Sprintf("successfully created pod %s for HumioCluster %s", pod.Name, humioCluster.Name)) + metricPodsCreated.Inc() + // We have created a pod. Requeue immediately even if the pod is not ready. We will check the readiness status on the next reconciliation. + return reconcile.Result{Requeue: true}, nil + } + + // TODO: what should happen if we have more pods than are expected? + return reconcile.Result{}, nil +} + // TODO: extend this (or create separate method) to take this password and perform a login, get the jwt token and then call the api to get the persistent api token and also store that as a secret // this functionality should perhaps go into humio.cluster_auth.go func (r *ReconcileHumioCluster) ensureDeveloperUserPasswordExists(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) error { diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index 8d317e03a..c963eac12 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -120,8 +120,17 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("reconcile: (%v)", err) } + updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != "Bootstrapping" { + t.Errorf("expected cluster state to be %s but got %s", "Bootstrapping", updatedHumioCluster.Status.ClusterState) + } + // Check that the developer password exists as a k8s secret - secret, err := r.GetSecret(context.TODO(), tt.humioCluster, serviceAccountSecretName) + secret, err := r.GetSecret(context.TODO(), updatedHumioCluster, serviceAccountSecretName) if err != nil { t.Errorf("get secret with password: (%v). %+v", err, secret) } @@ -129,10 +138,10 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("password secret %s expected content to not be empty, but it was", serviceAccountSecretName) } - for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { - foundPodList, err := ListPods(cl, tt.humioCluster) - if len(foundPodList) != nodeCount+1 { - t.Errorf("expected list pods to return equal to %d, got %d", nodeCount+1, len(foundPodList)) + for nodeCount := 1; nodeCount <= tt.humioCluster.Spec.NodeCount; nodeCount++ { + foundPodList, err := ListPods(cl, updatedHumioCluster) + if len(foundPodList) != nodeCount { + t.Errorf("expected list pods to return equal to %d, got %d", nodeCount, len(foundPodList)) } // We must update the IP address because when we attempt to add labels to the pod we validate that they have IP addresses first @@ -147,18 +156,36 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { if err != nil { t.Errorf("reconcile: (%v)", err) } + } + // Check that we do not create more than expected number of humio pods + res, err = r.Reconcile(req) + if err != nil { + t.Errorf("reconcile: (%v)", err) + } + foundPodList, err := ListPods(cl, updatedHumioCluster) + if len(foundPodList) != tt.humioCluster.Spec.NodeCount { + t.Errorf("expected list pods to return equal to %d, got %d", tt.humioCluster.Spec.NodeCount, len(foundPodList)) + } + + updatedHumioCluster = &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != "Running" { + t.Errorf("expected cluster state to be %s but got %s", "Running", updatedHumioCluster.Status.ClusterState) } // Check that the service exists - service, err := r.GetService(context.TODO(), tt.humioCluster) + service, err := r.GetService(context.TODO(), updatedHumioCluster) if err != nil { t.Errorf("get service: (%v). %+v", err, service) } // Check that the persistent token exists as a k8s secret - token, err := r.GetSecret(context.TODO(), tt.humioCluster, serviceTokenSecretName) + token, err := r.GetSecret(context.TODO(), updatedHumioCluster, serviceTokenSecretName) if err != nil { t.Errorf("get secret with api token: (%v). %+v", err, token) } @@ -181,8 +208,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("expected api token in use to be \"%+v\", but got \"%+v\"", "mocktoken", tokenInUse) } - // Get the updated HumioCluster object. - updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + // Get the updated HumioCluster to update it with the partitions err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) @@ -197,7 +223,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("expected ingest partitions to be balanced. got %v, err %s", b, err) } - foundPodList, err := ListPods(cl, tt.humioCluster) + foundPodList, err = ListPods(cl, updatedHumioCluster) if err != nil { t.Errorf("could not list pods to validate their content: %v", err) } @@ -294,6 +320,16 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { t.Errorf("reconcile: (%v)", err) } + updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != "Bootstrapping" { + t.Errorf("expected cluster state to be %s but got %s", "Bootstrapping", updatedHumioCluster.Status.ClusterState) + } + tt.humioCluster = updatedHumioCluster + for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { foundPodList, err := ListPods(cl, tt.humioCluster) if len(foundPodList) != nodeCount+1 { @@ -314,9 +350,19 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } } + // Test that we're in a Running state + updatedHumioCluster = &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + if updatedHumioCluster.Status.ClusterState != "Running" { + t.Errorf("expected cluster state to be %s but got %s", "Running", updatedHumioCluster.Status.ClusterState) + } + // Update humio image - tt.humioCluster.Spec.Image = tt.imageToUpdate - cl.Update(context.TODO(), tt.humioCluster) + updatedHumioCluster.Spec.Image = tt.imageToUpdate + cl.Update(context.TODO(), updatedHumioCluster) for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { res, err := r.Reconcile(req) @@ -348,7 +394,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } } - foundPodList, err = ListPods(cl, tt.humioCluster) + foundPodList, err = ListPods(cl, updatedHumioCluster) if err != nil { t.Errorf("failed to list pods: %s", err) } diff --git a/pkg/controller/humiocluster/pods.go b/pkg/controller/humiocluster/pods.go index 8a338aa68..ffb0d9afb 100644 --- a/pkg/controller/humiocluster/pods.go +++ b/pkg/controller/humiocluster/pods.go @@ -130,9 +130,7 @@ func labelsForPod(clusterName string, nodeID int) map[string]string { func generatePodSuffix() string { rand.Seed(time.Now().UnixNano()) - chars := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "abcdefghijklmnopqrstuvwxyz" + - "0123456789") + chars := []rune("abcdefghijklmnopqrstuvwxyz") length := 6 var b strings.Builder for i := 0; i < length; i++ { From 69f1cbeb63b4b7b463454bf9aeeb581d80141bce Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 15:58:07 -0700 Subject: [PATCH 2/4] Make cluster state a variable in the spec --- pkg/apis/core/v1alpha1/humiocluster_types.go | 7 +++ .../humiocluster/humiocluster_controller.go | 6 +-- .../humiocluster_controller_test.go | 46 +++++++++---------- 3 files changed, 33 insertions(+), 26 deletions(-) diff --git a/pkg/apis/core/v1alpha1/humiocluster_types.go b/pkg/apis/core/v1alpha1/humiocluster_types.go index c44f3cb53..39540a7da 100644 --- a/pkg/apis/core/v1alpha1/humiocluster_types.go +++ b/pkg/apis/core/v1alpha1/humiocluster_types.go @@ -5,6 +5,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + // HumioClusterStateBoostrapping is the Bootstrapping state of the cluster + HumioClusterStateBoostrapping = "Bootstrapping" + // HumioClusterStateRunning is the Running state of the cluster + HumioClusterStateRunning = "Running" +) + // HumioClusterSpec defines the desired state of HumioCluster type HumioClusterSpec struct { // Desired container image including the image tag diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index d57532586..ffa141010 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -131,7 +131,7 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. // Assume we are bootstrapping if no cluster state is set. // TODO: this is a workaround for the issue where humio pods cannot start up at the same time during the first boot if humioCluster.Status.ClusterState == "" { - r.setClusterStatus(context.TODO(), "Bootstrapping", humioCluster) + r.setClusterStatus(context.TODO(), corev1alpha1.HumioClusterStateBoostrapping, humioCluster) } // Ensure developer password is a k8s secret @@ -149,14 +149,14 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. } // Ensure pods exist. Will requeue if not all pods are created and ready - if humioCluster.Status.ClusterState == "Bootstrapping" { + if humioCluster.Status.ClusterState == corev1alpha1.HumioClusterStateBoostrapping { result, err = r.ensurePodsBootstrapped(context.TODO(), humioCluster) if result != emptyResult || err != nil { return result, err } } - r.setClusterStatus(context.TODO(), "Running", humioCluster) + r.setClusterStatus(context.TODO(), corev1alpha1.HumioClusterStateRunning, humioCluster) result, err = r.ensurePodsExist(context.TODO(), humioCluster) if result != emptyResult || err != nil { diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index c963eac12..b23415169 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -9,7 +9,7 @@ import ( "time" humioapi "github.com/humio/cli/api" - humioClusterv1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" + corev1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" "github.com/humio/humio-operator/pkg/humio" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,17 +28,17 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { tests := []struct { name string - humioCluster *humioClusterv1alpha1.HumioCluster + humioCluster *corev1alpha1.HumioCluster humioClient *humio.MockClientConfig }{ { "test simple cluster reconciliation", - &humioClusterv1alpha1.HumioCluster{ + &corev1alpha1.HumioCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "humiocluster", Namespace: "logging", }, - Spec: humioClusterv1alpha1.HumioClusterSpec{ + Spec: corev1alpha1.HumioClusterSpec{ Image: "humio/humio-core:1.9.1", TargetReplicationFactor: 2, StoragePartitionsCount: 3, @@ -55,12 +55,12 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { }, { "test large cluster reconciliation", - &humioClusterv1alpha1.HumioCluster{ + &corev1alpha1.HumioCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "humiocluster", Namespace: "logging", }, - Spec: humioClusterv1alpha1.HumioClusterSpec{ + Spec: corev1alpha1.HumioClusterSpec{ Image: "humio/humio-core:1.9.1", TargetReplicationFactor: 3, StoragePartitionsCount: 72, @@ -86,7 +86,7 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { // Register operator types with the runtime scheme. s := scheme.Scheme - s.AddKnownTypes(humioClusterv1alpha1.SchemeGroupVersion, tt.humioCluster) + s.AddKnownTypes(corev1alpha1.SchemeGroupVersion, tt.humioCluster) // Create a fake client to mock API calls. cl := fake.NewFakeClient(objs...) @@ -120,13 +120,13 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("reconcile: (%v)", err) } - updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + updatedHumioCluster := &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) } - if updatedHumioCluster.Status.ClusterState != "Bootstrapping" { - t.Errorf("expected cluster state to be %s but got %s", "Bootstrapping", updatedHumioCluster.Status.ClusterState) + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateBoostrapping { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateBoostrapping, updatedHumioCluster.Status.ClusterState) } // Check that the developer password exists as a k8s secret @@ -168,13 +168,13 @@ func TestReconcileHumioCluster_Reconcile(t *testing.T) { t.Errorf("expected list pods to return equal to %d, got %d", tt.humioCluster.Spec.NodeCount, len(foundPodList)) } - updatedHumioCluster = &humioClusterv1alpha1.HumioCluster{} + updatedHumioCluster = &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) } - if updatedHumioCluster.Status.ClusterState != "Running" { - t.Errorf("expected cluster state to be %s but got %s", "Running", updatedHumioCluster.Status.ClusterState) + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } // Check that the service exists @@ -248,18 +248,18 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { tests := []struct { name string - humioCluster *humioClusterv1alpha1.HumioCluster + humioCluster *corev1alpha1.HumioCluster humioClient *humio.MockClientConfig imageToUpdate string }{ { "test simple cluster humio image update", - &humioClusterv1alpha1.HumioCluster{ + &corev1alpha1.HumioCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "humiocluster", Namespace: "logging", }, - Spec: humioClusterv1alpha1.HumioClusterSpec{ + Spec: corev1alpha1.HumioClusterSpec{ Image: "humio/humio-core:1.9.1", TargetReplicationFactor: 2, StoragePartitionsCount: 3, @@ -286,7 +286,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { // Register operator types with the runtime scheme. s := scheme.Scheme - s.AddKnownTypes(humioClusterv1alpha1.SchemeGroupVersion, tt.humioCluster) + s.AddKnownTypes(corev1alpha1.SchemeGroupVersion, tt.humioCluster) // Create a fake client to mock API calls. cl := fake.NewFakeClient(objs...) @@ -320,13 +320,13 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { t.Errorf("reconcile: (%v)", err) } - updatedHumioCluster := &humioClusterv1alpha1.HumioCluster{} + updatedHumioCluster := &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) } - if updatedHumioCluster.Status.ClusterState != "Bootstrapping" { - t.Errorf("expected cluster state to be %s but got %s", "Bootstrapping", updatedHumioCluster.Status.ClusterState) + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateBoostrapping { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateBoostrapping, updatedHumioCluster.Status.ClusterState) } tt.humioCluster = updatedHumioCluster @@ -351,13 +351,13 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } // Test that we're in a Running state - updatedHumioCluster = &humioClusterv1alpha1.HumioCluster{} + updatedHumioCluster = &corev1alpha1.HumioCluster{} err = r.client.Get(context.TODO(), req.NamespacedName, updatedHumioCluster) if err != nil { t.Errorf("get HumioCluster: (%v)", err) } - if updatedHumioCluster.Status.ClusterState != "Running" { - t.Errorf("expected cluster state to be %s but got %s", "Running", updatedHumioCluster.Status.ClusterState) + if updatedHumioCluster.Status.ClusterState != corev1alpha1.HumioClusterStateRunning { + t.Errorf("expected cluster state to be %s but got %s", corev1alpha1.HumioClusterStateRunning, updatedHumioCluster.Status.ClusterState) } // Update humio image From aa8cfa0fb16abc8e796361683a03775993311c8c Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 16:18:53 -0700 Subject: [PATCH 3/4] Add cluster state to the get output fields --- deploy/crds/core.humio.com_humioclusters_crd.yaml | 5 +++++ pkg/apis/core/v1alpha1/humiocluster_types.go | 1 + 2 files changed, 6 insertions(+) diff --git a/deploy/crds/core.humio.com_humioclusters_crd.yaml b/deploy/crds/core.humio.com_humioclusters_crd.yaml index a1a276f12..7c62bc2df 100644 --- a/deploy/crds/core.humio.com_humioclusters_crd.yaml +++ b/deploy/crds/core.humio.com_humioclusters_crd.yaml @@ -3,6 +3,11 @@ kind: CustomResourceDefinition metadata: name: humioclusters.core.humio.com spec: + additionalPrinterColumns: + - JSONPath: .status.clusterState + description: The state of the cluster + name: State + type: string group: core.humio.com names: kind: HumioCluster diff --git a/pkg/apis/core/v1alpha1/humiocluster_types.go b/pkg/apis/core/v1alpha1/humiocluster_types.go index 39540a7da..dfe723bef 100644 --- a/pkg/apis/core/v1alpha1/humiocluster_types.go +++ b/pkg/apis/core/v1alpha1/humiocluster_types.go @@ -43,6 +43,7 @@ type HumioClusterStatus struct { // HumioCluster is the Schema for the humioclusters API // +kubebuilder:subresource:status // +kubebuilder:resource:path=humioclusters,scope=Namespaced +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.clusterState",description="The state of the cluster" type HumioCluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` From ea60424e4ff2cf93607add886d231fa6dd1535d0 Mon Sep 17 00:00:00 2001 From: Jestin Woods Date: Thu, 26 Mar 2020 17:17:37 -0700 Subject: [PATCH 4/4] Update tt.humioCluster to the updatedCluster in a couple places --- pkg/controller/humiocluster/humiocluster_controller_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index b23415169..bb5a1809f 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -331,7 +331,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { tt.humioCluster = updatedHumioCluster for nodeCount := 0; nodeCount < tt.humioCluster.Spec.NodeCount; nodeCount++ { - foundPodList, err := ListPods(cl, tt.humioCluster) + foundPodList, err := ListPods(cl, updatedHumioCluster) if len(foundPodList) != nodeCount+1 { t.Errorf("expected list pods to return equal to %d, got %d", nodeCount+1, len(foundPodList)) } @@ -375,7 +375,7 @@ func TestReconcileHumioCluster_Reconcile_update_humio_image(t *testing.T) { } // Ensure all the pods are shut down to prep for the image update - foundPodList, err := ListPods(cl, tt.humioCluster) + foundPodList, err := ListPods(cl, updatedHumioCluster) if err != nil { t.Errorf("failed to list pods: %s", err) }