diff --git a/deploy/crds/core.humio.com_humioclusters_crd.yaml b/deploy/crds/core.humio.com_humioclusters_crd.yaml index 4e2c6ebeb..1b55e042f 100644 --- a/deploy/crds/core.humio.com_humioclusters_crd.yaml +++ b/deploy/crds/core.humio.com_humioclusters_crd.yaml @@ -31,9 +31,15 @@ spec: spec: description: HumioClusterSpec defines the desired state of HumioCluster properties: + digestPartitionsCount: + description: Desired number of digest partitions + type: integer image: description: Desired container image type: string + storagePartitionsCount: + description: Desired number of storage partitions + type: integer targetReplicationFactor: description: Desired number of replicas of both storage and ingest partitions type: integer diff --git a/deploy/crds/core.humio.com_v1alpha1_humiocluster_cr.yaml b/deploy/crds/core.humio.com_v1alpha1_humiocluster_cr.yaml index 5a2168f7c..ac453db7d 100644 --- a/deploy/crds/core.humio.com_v1alpha1_humiocluster_cr.yaml +++ b/deploy/crds/core.humio.com_v1alpha1_humiocluster_cr.yaml @@ -6,3 +6,4 @@ spec: image: humio/humio-core version: "1.9.0" targetReplicationFactor: 2 + storagePartitionsCount: 24 diff --git a/go.mod b/go.mod index 2491bed29..aafbbfe80 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,27 @@ module github.com/humio/humio-operator go 1.13 require ( - github.com/humio/cli v0.24.2 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/golang/protobuf v1.3.4 // indirect + github.com/humio/cli v0.23.1-0.20191107092311-06c8a816c799 + github.com/mattn/go-runewidth v0.0.8 // indirect + github.com/olekukonko/tablewriter v0.0.4 // indirect github.com/operator-framework/operator-sdk v0.15.1 - github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f // indirect + github.com/pelletier/go-toml v1.6.0 // indirect + github.com/prometheus/client_golang v1.2.1 + github.com/prometheus/common v0.7.0 + github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/cobra v0.0.6 // indirect github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.6.2 // indirect + golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 // indirect + golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect + gopkg.in/ini.v1 v1.52.0 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/client-go v12.0.0+incompatible diff --git a/go.sum b/go.sum index 271420f85..754c581fa 100644 --- a/go.sum +++ b/go.sum @@ -130,6 +130,7 @@ github.com/coreos/prometheus-operator v0.34.0 h1:TF9qaydNeUamLKs0hniaapa4FBz8U8T github.com/coreos/prometheus-operator v0.34.0/go.mod h1:Li6rMllG/hYIyXfMuvUwhyC+hqwJVHdsDdP21hypT1M= github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -191,6 +192,7 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -295,6 +297,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -359,6 +363,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/heketi/heketi v9.0.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/rest v0.0.0-20180404230133-aa6a65207413/go.mod h1:BeS3M108VzVlmAue3lv2WcGuPAX94/KN63MUURzbYSI= @@ -368,6 +373,10 @@ github.com/helm/helm-2to3 v0.2.0/go.mod h1:jQUVAWB0bM7zNIqKPIfHFzuFSK0kHYovJrjO+ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/humio/cli v0.23.0 h1:PK3cPa1/aN9Rbo1VMXoG9oSrgsFstDb/Lyxh0X/6Kz8= +github.com/humio/cli v0.23.0/go.mod h1:O8SfBh83eum4j4RpIrF9oeH01wiZZB1Jd0MnqdHBBA8= +github.com/humio/cli v0.23.1-0.20191107092311-06c8a816c799 h1:FGkOcxmvJyTOVhqo9iSyF1AsK9fYlsnrE8NVy7sHffA= +github.com/humio/cli v0.23.1-0.20191107092311-06c8a816c799/go.mod h1:9v5/6etu0lFf/PNRwvojGyIUO2V7EMBpzQcMjTFyY7g= github.com/humio/cli v0.24.2 h1:ChtryfQI36ncifNsrqYBgNLV0d02Ngo+hXNWN6WFwbU= github.com/humio/cli v0.24.2/go.mod h1:O8SfBh83eum4j4RpIrF9oeH01wiZZB1Jd0MnqdHBBA8= github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= @@ -377,6 +386,7 @@ github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/improbable-eng/thanos v0.3.2/go.mod h1:GZewVGILKuJVPNRn7L4Zw+7X96qzFOwj63b22xYGXBE= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= @@ -422,6 +432,7 @@ github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -441,6 +452,11 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -456,10 +472,12 @@ github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nr github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= @@ -483,6 +501,9 @@ github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -522,7 +543,10 @@ github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0 github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20171002181615-b8543db493a5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -580,6 +604,10 @@ github.com/rubenv/sql-migrate v0.0.0-20191025130928-9355dd04f4b3/go.mod h1:WS0rl github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= @@ -587,22 +615,35 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk= github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/skratchdot/open-golang v0.0.0-20190402232053-79abb63cd66e/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -610,7 +651,11 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.5.0/go.mod h1:AkYRkVJF8TkSG/xet6PzXX+l39KhhXa2pdqVSxnTcn4= +github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -620,6 +665,8 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -679,6 +726,9 @@ golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152 h1:ZC1Xn5A1nlpSmQCIva4bZ3ob3lmhYIefc+GU+DLg1Ow= golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -721,12 +771,16 @@ golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 h1:N66aaryRB3Ax92gH0v3hp1QYZ3zWWCCUR/j8Ifh45Ss= golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -763,6 +817,8 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191028164358-195ce5e7f934 h1:u/E0NqCIWRDAo9WCFo6Ko49njPFDLSd3z+X1HgWDMpE= golang.org/x/sys v0.0.0-20191028164358-195ce5e7f934/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -811,6 +867,7 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190128161407-8ac453e89fca/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= @@ -843,6 +900,10 @@ gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0 h1:j+Lt/M1oPPejkniCg1TkWE2J3Eh1oZTsHSXzMTzUXn4= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -858,6 +919,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= diff --git a/pkg/apis/core/v1alpha1/humiocluster_types.go b/pkg/apis/core/v1alpha1/humiocluster_types.go index a7f621516..e4599b284 100644 --- a/pkg/apis/core/v1alpha1/humiocluster_types.go +++ b/pkg/apis/core/v1alpha1/humiocluster_types.go @@ -12,6 +12,12 @@ type HumioClusterSpec struct { Version string `json:"version,omitempty"` // Desired number of replicas of both storage and ingest partitions TargetReplicationFactor int `json:"targetReplicationFactor,omitempty"` + // Desired number of storage partitions + StoragePartitionsCount int `json:"storagePartitionsCount,omitempty"` + // Desired number of digest partitions + DigestPartitionsCount int `json:"digestPartitionsCount,omitempty"` + // Desired number of nodes + NodeCount int `json:"nodeCount,omitempty"` } // HumioClusterStatus defines the observed state of HumioCluster diff --git a/pkg/controller/humiocluster/defaults.go b/pkg/controller/humiocluster/defaults.go new file mode 100644 index 000000000..466017035 --- /dev/null +++ b/pkg/controller/humiocluster/defaults.go @@ -0,0 +1,40 @@ +package humiocluster + +import ( + humioClusterv1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" +) + +const ( + name = "humiocluster" + namespace = "logging" + image = "humio/humio-core" + version = "1.9.0" + targetReplicationFactor = 2 + storagePartitionsCount = 24 + digestPartitionsCount = 24 + nodeCount = 3 +) + +func setDefaults(humioCluster *humioClusterv1alpha1.HumioCluster) { + if humioCluster.ObjectMeta.Name == "" { + humioCluster.ObjectMeta.Name = name + } + if humioCluster.ObjectMeta.Namespace == "" { + humioCluster.ObjectMeta.Namespace = namespace + } + if humioCluster.Spec.Image == "" { + humioCluster.Spec.Image = image + } + if humioCluster.Spec.TargetReplicationFactor == 0 { + humioCluster.Spec.TargetReplicationFactor = targetReplicationFactor + } + if humioCluster.Spec.StoragePartitionsCount == 0 { + humioCluster.Spec.StoragePartitionsCount = storagePartitionsCount + } + if humioCluster.Spec.DigestPartitionsCount == 0 { + humioCluster.Spec.DigestPartitionsCount = digestPartitionsCount + } + if humioCluster.Spec.NodeCount == 0 { + humioCluster.Spec.NodeCount = nodeCount + } +} diff --git a/pkg/controller/humiocluster/humiocluster_controller.go b/pkg/controller/humiocluster/humiocluster_controller.go index 37d62c284..a9953beea 100644 --- a/pkg/controller/humiocluster/humiocluster_controller.go +++ b/pkg/controller/humiocluster/humiocluster_controller.go @@ -2,24 +2,40 @@ package humiocluster import ( "context" + "fmt" + "strconv" corev1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" + "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) -var log = logf.Log.WithName("controller_humiocluster") +var ( + log = logf.Log.WithName("controller_humiocluster") + metricPodsCreated = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "humio_controller_pods_created_total", + Help: "Total number of pod objects created by controller", + }) + metricPodsDeleted = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "humio_controller_pods_deleted_total", + Help: "Total number of pod objects deleted by controller", + }) +) /** * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller @@ -86,9 +102,9 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) reqLogger.Info("Reconciling HumioCluster") - // Fetch the HumioCluster instance - instance := &corev1alpha1.HumioCluster{} - err := r.client.Get(context.TODO(), request.NamespacedName, instance) + // Fetch the HumioCluster + humioCluster := &corev1alpha1.HumioCluster{} + err := r.client.Get(context.TODO(), request.NamespacedName, humioCluster) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. @@ -100,33 +116,189 @@ func (r *ReconcileHumioCluster) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{}, err } - // Define a new Pod object - pod := newPodForCR(instance) + // Set defaults + setDefaults(humioCluster) - // Set HumioCluster instance as the owner and controller - if err := controllerutil.SetControllerReference(instance, pod, r.scheme); err != nil { + // Ensure pods exist + err = r.ensurePodsExist(context.TODO(), humioCluster) + if err != nil { return reconcile.Result{}, err } - // Check if this Pod already exists - found := &corev1.Pod{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, found) - if err != nil && errors.IsNotFound(err) { - reqLogger.Info("Creating a new Pod", "Pod.Namespace", pod.Namespace, "Pod.Name", pod.Name) - err = r.client.Create(context.TODO(), pod) - if err != nil { - return reconcile.Result{}, err + // All done, don't requeue + return reconcile.Result{}, nil +} + +func (r *ReconcileHumioCluster) ensurePodsExist(conetext context.Context, humioCluster *corev1alpha1.HumioCluster) error { + // Ensure we have pods for the defined NodeCount. + // If scaling down, we will handle the extra/obsolete pods later. + for nodeID := 0; nodeID < humioCluster.Spec.NodeCount; nodeID++ { + var existingPod corev1.Pod + pod := constructPod(humioCluster, nodeID) + + if err := controllerutil.SetControllerReference(humioCluster, pod, r.scheme); err != nil { + return err } - // Pod created successfully - don't requeue - return reconcile.Result{}, nil - } else if err != nil { - return reconcile.Result{}, err + if err := r.client.Get(context.TODO(), types.NamespacedName{ + Namespace: humioCluster.Namespace, + Name: fmt.Sprintf("%s-core-%d", humioCluster.Name, nodeID), + }, &existingPod); err != nil { + if k8serrors.IsNotFound(err) { + + err := r.client.Create(context.TODO(), pod) + if err != nil { + log.Info(fmt.Sprintf("unable to create pod: %v", err)) + return fmt.Errorf("unable to create Pod for HumioCluster: %v", err) + } + log.Info(fmt.Sprintf("successfully created pod %s for HumioCluster %s with node id: %d", pod.Name, humioCluster.Name, nodeID)) + metricPodsCreated.Inc() + } + } } + return nil +} - // Pod already exists - don't requeue - reqLogger.Info("Skip reconcile: Pod already exists", "Pod.Namespace", found.Namespace, "Pod.Name", found.Name) - return reconcile.Result{}, nil +func constructPod(hc *corev1alpha1.HumioCluster, nodeID int) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-core-%d", hc.Name, nodeID), + Namespace: hc.Namespace, + Labels: labelsForHumio(hc.Name, nodeID), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: hc.APIVersion, + Kind: hc.Kind, + Name: hc.Name, + UID: hc.UID, + }, + }, + }, + Spec: corev1.PodSpec{ + Hostname: fmt.Sprintf("%s-core-%d", hc.Name, nodeID), + Subdomain: hc.Name, + Containers: []corev1.Container{ + { + Name: "humio", + Image: fmt.Sprintf("%s:%s", hc.Spec.Image, hc.Spec.Version), + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 8080, + Protocol: "TCP", + }, + { + Name: "es", + ContainerPort: 9200, + Protocol: "TCP", + }, + }, + Env: *constructEnvVarList(nodeID), + ImagePullPolicy: "IfNotPresent", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "humio-data", + MountPath: "/data", + }, + }, + ReadinessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/api/v1/status", + Port: intstr.IntOrString{IntVal: 8080}, + }, + }, + InitialDelaySeconds: 90, + PeriodSeconds: 5, + TimeoutSeconds: 2, + SuccessThreshold: 1, + FailureThreshold: 12, + }, + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/api/v1/status", + Port: intstr.IntOrString{IntVal: 8080}, + }, + }, + InitialDelaySeconds: 90, + PeriodSeconds: 5, + TimeoutSeconds: 2, + SuccessThreshold: 1, + FailureThreshold: 12, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "humio-data", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: fmt.Sprintf("%s-core-%d", hc.Name, nodeID), + }, + }, + }, + }, + }, + } +} + +func constructEnvVarList(nodeID int) *[]corev1.EnvVar { + return &[]corev1.EnvVar{ + { + Name: "THIS_POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + {Name: "BOOTSTRAP_HOST_ID", Value: strconv.Itoa(nodeID)}, + {Name: "HUMIO_JVM_ARGS", Value: "-Xss2m -Xms256m -Xmx1536m -server -XX:+UseParallelOldGC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC"}, + {Name: "HUMIO_PORT", Value: "8080"}, + {Name: "ELASTIC_PORT", Value: "9200"}, + {Name: "KAFKA_MANAGED_BY_HUMIO", Value: "true"}, + {Name: "AUTHENTICATION_METHOD", Value: "single-user"}, + {Name: "SINGLE_USER_PASSWORD", Value: "temp"}, + {Name: "KAFKA_SERVERS", Value: "humio-cp-kafka-0.humio-cp-kafka-headless:9092"}, + {Name: "ZOOKEEPER_URL", Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless:2181"}, + { + Name: "EXTERNAL_URL", // URL used by other Humio hosts. + Value: fmt.Sprintf("http://$(POD_NAME).core.$(POD_NAMESPACE).svc.cluster.local:$(HUMIO_PORT)"), + //Value: "http://$(POD_NAME).humio-humio-core-headless.$(POD_NAMESPACE).svc.cluster.local:8080", + //Value: "http://$(THIS_POD_IP):$(HUMIO_PORT)", + }, + { + Name: "PUBLIC_URL", // URL used by users/browsers. + //Value: "http://$(POD_NAME).humio-humio-core-headless.$(POD_NAMESPACE).svc.cluster.local:8080", + Value: "http://$(THIS_POD_IP):$(HUMIO_PORT)", + }, + } +} + +func labelsForHumio(clusterName string, nodeID int) map[string]string { + labels := map[string]string{ + "app": "humio", + "humio_cr": clusterName, + "humio_node_id": strconv.Itoa(nodeID), + } + return labels } // newPodForCR returns a busybox pod with the same name/namespace as the cr @@ -151,3 +323,8 @@ func newPodForCR(cr *corev1alpha1.HumioCluster) *corev1.Pod { }, } } + +func init() { + metrics.Registry.MustRegister(metricPodsCreated) + metrics.Registry.MustRegister(metricPodsDeleted) +} diff --git a/pkg/controller/humiocluster/humiocluster_controller_test.go b/pkg/controller/humiocluster/humiocluster_controller_test.go index d834bc395..3938b4c18 100644 --- a/pkg/controller/humiocluster/humiocluster_controller_test.go +++ b/pkg/controller/humiocluster/humiocluster_controller_test.go @@ -9,78 +9,90 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -var ( - humioCluster1 = &humioClusterv1alpha1.HumioCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "humiocluster", - Namespace: "logging", - }, - Spec: humioClusterv1alpha1.HumioClusterSpec{ - Image: "humio/humio-core", - Version: "1.9.0", - TargetReplicationFactor: 1, - }, - } + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" ) func TestReconcileHumioCluster_Reconcile(t *testing.T) { // Set the logger to development mode for verbose logs. logf.SetLogger(logf.ZapLogger(true)) - // Objects to track in the fake client. - objs := []runtime.Object{ - humioCluster1, + tests := []struct { + name string + humioCluster *humioClusterv1alpha1.HumioCluster + }{ + { + "test simple cluster reconciliation", + &humioClusterv1alpha1.HumioCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "humiocluster", + Namespace: "logging", + }, + Spec: humioClusterv1alpha1.HumioClusterSpec{ + Image: "humio/humio-core", + Version: "1.9.0", + TargetReplicationFactor: 3, + NodeCount: 3, + }, + }, + }, } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(humioClusterv1alpha1.SchemeGroupVersion, humioCluster1) + // Objects to track in the fake client. + objs := []runtime.Object{ + tt.humioCluster, + } - // Create a fake client to mock API calls. - cl := fake.NewFakeClient(objs...) - // Create a ReconcileHumioCluster object with the scheme and fake client. - r := &ReconcileHumioCluster{client: cl, scheme: s} + // Register operator types with the runtime scheme. + s := scheme.Scheme + s.AddKnownTypes(humioClusterv1alpha1.SchemeGroupVersion, tt.humioCluster) - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: humioCluster1.ObjectMeta.Name, - Namespace: humioCluster1.ObjectMeta.Namespace, - }, - } - res, err := r.Reconcile(req) - if err != nil { - t.Fatalf("reconcile: (%v)", err) - } + // Create a fake client to mock API calls. + cl := fake.NewFakeClient(objs...) + // Create a ReconcileHumioCluster object with the scheme and fake client. + r := &ReconcileHumioCluster{client: cl, scheme: s} - pod := &corev1.Pod{} - err = cl.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s-pod", humioCluster1.ObjectMeta.Name), Namespace: humioCluster1.ObjectMeta.Namespace}, pod) - if err != nil { - t.Fatalf("get pod: (%v). %+v", err, pod) - } + // Mock request to simulate Reconcile() being called on an event for a + // watched resource . + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tt.humioCluster.ObjectMeta.Name, + Namespace: tt.humioCluster.ObjectMeta.Namespace, + }, + } + res, err := r.Reconcile(req) + if err != nil { + t.Errorf("reconcile: (%v)", err) + } - // Reconcile again so Reconcile() checks pods and updates the HumioCluster resources' Status. - res, err = r.Reconcile(req) - if err != nil { - t.Fatalf("reconcile: (%v)", err) - } - if res != (reconcile.Result{}) { - t.Error("reconcile did not return an empty Result") - } + for nodeID := 0; nodeID < tt.humioCluster.Spec.NodeCount; nodeID++ { + pod := &corev1.Pod{} + err = cl.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s-core-%d", tt.humioCluster.ObjectMeta.Name, nodeID), Namespace: tt.humioCluster.ObjectMeta.Namespace}, pod) + if err != nil { + t.Errorf("get pod: (%v). %+v", err, pod) + } + } + + // Reconcile again so Reconcile() checks pods and updates the HumioCluster resources' Status. + res, err = r.Reconcile(req) + if err != nil { + t.Errorf("reconcile: (%v)", err) + } + if res != (reconcile.Result{}) { + t.Error("reconcile did not return an empty Result") + } - // Get the updated HumioCluster object. - humioCluster := &humioClusterv1alpha1.HumioCluster{} - err = r.client.Get(context.TODO(), req.NamespacedName, humioCluster) - if err != nil { - t.Errorf("get HumioCluster: (%v)", err) + // Get the updated HumioCluster object. + humioCluster := &humioClusterv1alpha1.HumioCluster{} + err = r.client.Get(context.TODO(), req.NamespacedName, humioCluster) + if err != nil { + t.Errorf("get HumioCluster: (%v)", err) + } + }) } } diff --git a/pkg/humio/client.go b/pkg/humio/client.go new file mode 100644 index 000000000..0506bf75f --- /dev/null +++ b/pkg/humio/client.go @@ -0,0 +1,93 @@ +package humio + +import ( + "fmt" + + humioapi "github.com/humio/cli/api" + "github.com/prometheus/common/log" +) + +// Client is the interface that can be mocked +type Client interface { + GetClusters() (humioapi.Cluster, error) + UpdateStoragePartitionScheme([]humioapi.StoragePartitionInput) error + UpdateIngestPartitionScheme([]humioapi.IngestPartitionInput) error + StartDataRedistribution() error + ClusterMoveStorageRouteAwayFromNode(int) error + ClusterMoveIngestRoutesAwayFromNode(int) error + Unregister(int) error + GetStoragePartitions() (*[]humioapi.StoragePartition, error) + GetIngestPartitions() (*[]humioapi.IngestPartition, error) +} + +// ClientConfig stores our Humio api client +type ClientConfig struct { + apiClient *humioapi.Client +} + +// NewClient returns a ClientConfig +func NewClient(config *humioapi.Config) (ClientConfig, error) { + //humioapi.NewClient(humioapi.Config{Address: hc.Status.BaseURL, Token: hc.Status.JWTToken}) + client, err := humioapi.NewClient(*config) + if err != nil { + log.Info(fmt.Sprintf("could not create humio client: %v", err)) + } + return ClientConfig{apiClient: client}, err +} + +// GetClusters returns a humio cluster and can be mocked via the Client interface +func (h *ClientConfig) GetClusters() (humioapi.Cluster, error) { + clusters, err := h.apiClient.Clusters().Get() + if err != nil { + log.Error(fmt.Sprintf("could not get cluster information: %v", err)) + } + return clusters, err +} + +// UpdateStoragePartitionScheme updates the storage partition scheme and can be mocked via the Client interface +func (h *ClientConfig) UpdateStoragePartitionScheme(spi []humioapi.StoragePartitionInput) error { + err := h.apiClient.Clusters().UpdateStoragePartitionScheme(spi) + if err != nil { + log.Error(fmt.Sprintf("could not update storage partition scheme cluster information: %v", err)) + } + return err +} + +// UpdateIngestPartitionScheme updates the ingest partition scheme and can be mocked via the Client interface +func (h *ClientConfig) UpdateIngestPartitionScheme(ipi []humioapi.IngestPartitionInput) error { + err := h.apiClient.Clusters().UpdateIngestPartitionScheme(ipi) + if err != nil { + log.Error(fmt.Sprintf("could not update ingest partition scheme cluster information: %v", err)) + } + return err +} + +// StartDataRedistribution notifies the Humio cluster that it should start redistributing data to match current assignments +func (h *ClientConfig) StartDataRedistribution() error { + return h.apiClient.Clusters().StartDataRedistribution() +} + +// ClusterMoveStorageRouteAwayFromNode notifies the Humio cluster that a node ID should be removed from handling any storage partitions +func (h *ClientConfig) ClusterMoveStorageRouteAwayFromNode(id int) error { + return h.apiClient.Clusters().ClusterMoveStorageRouteAwayFromNode(id) +} + +// ClusterMoveIngestRoutesAwayFromNode notifies the Humio cluster that a node ID should be removed from handling any ingest partitions +func (h *ClientConfig) ClusterMoveIngestRoutesAwayFromNode(id int) error { + return h.apiClient.Clusters().ClusterMoveIngestRoutesAwayFromNode(id) +} + +// Unregister tells the Humio cluster that we want to unregister a node +func (h *ClientConfig) Unregister(id int) error { + return h.apiClient.ClusterNodes().Unregister(int64(id), false) +} + +// GetStoragePartitions is not implemented +func (h *ClientConfig) GetStoragePartitions() (*[]humioapi.StoragePartition, error) { + return &[]humioapi.StoragePartition{}, fmt.Errorf("not implemented") +} + +// GetIngestPartitions is not immplemented +func (h *ClientConfig) GetIngestPartitions() (*[]humioapi.IngestPartition, error) { + return &[]humioapi.IngestPartition{}, fmt.Errorf("not implemented") +} diff --git a/pkg/humio/client_test.go b/pkg/humio/client_test.go new file mode 100644 index 000000000..566170dc2 --- /dev/null +++ b/pkg/humio/client_test.go @@ -0,0 +1,101 @@ +package humio + +import ( + humioapi "github.com/humio/cli/api" +) + +type ClientMock struct { + Cluster humioapi.Cluster + ClusterError error + StoragePartitions *[]humioapi.StoragePartition + IngestPartitions *[]humioapi.IngestPartition + UpdateStoragePartitionSchemeError error + UpdateIngestPartitionSchemeError error +} + +type MockClientConfig struct { + apiClient *ClientMock +} + +func NewMocklient(cluster humioapi.Cluster, clusterError error, updateStoragePartitionSchemeError error, updateIngestPartitionSchemeError error) *MockClientConfig { + storagePartition := humioapi.StoragePartition{} + ingestPartition := humioapi.IngestPartition{} + + return &MockClientConfig{ + apiClient: &ClientMock{ + Cluster: cluster, + ClusterError: clusterError, + StoragePartitions: &[]humioapi.StoragePartition{storagePartition}, + IngestPartitions: &[]humioapi.IngestPartition{ingestPartition}, + UpdateStoragePartitionSchemeError: updateStoragePartitionSchemeError, + UpdateIngestPartitionSchemeError: updateIngestPartitionSchemeError, + }, + } +} + +func (h *MockClientConfig) GetClusters() (humioapi.Cluster, error) { + if h.apiClient.ClusterError != nil { + return humioapi.Cluster{}, h.apiClient.ClusterError + } + return h.apiClient.Cluster, nil +} + +func (h *MockClientConfig) UpdateStoragePartitionScheme(sps []humioapi.StoragePartitionInput) error { + if h.apiClient.UpdateStoragePartitionSchemeError != nil { + return h.apiClient.UpdateStoragePartitionSchemeError + } + + var storagePartitions []humioapi.StoragePartition + for _, storagePartitionInput := range sps { + var nodeIdsList []int + for _, nodeID := range storagePartitionInput.NodeIDs { + nodeIdsList = append(nodeIdsList, int(nodeID)) + } + storagePartitions = append(storagePartitions, humioapi.StoragePartition{Id: int(storagePartitionInput.ID), NodeIds: nodeIdsList}) + } + h.apiClient.StoragePartitions = &storagePartitions + + return nil +} + +func (h *MockClientConfig) UpdateIngestPartitionScheme(ips []humioapi.IngestPartitionInput) error { + if h.apiClient.UpdateIngestPartitionSchemeError != nil { + return h.apiClient.UpdateIngestPartitionSchemeError + } + + var ingestPartitions []humioapi.IngestPartition + for _, ingestPartitionInput := range ips { + var nodeIdsList []int + for _, nodeID := range ingestPartitionInput.NodeIDs { + nodeIdsList = append(nodeIdsList, int(nodeID)) + } + ingestPartitions = append(ingestPartitions, humioapi.IngestPartition{Id: int(ingestPartitionInput.ID), NodeIds: nodeIdsList}) + } + h.apiClient.IngestPartitions = &ingestPartitions + + return nil +} + +func (h *MockClientConfig) ClusterMoveStorageRouteAwayFromNode(int) error { + return nil +} + +func (h *MockClientConfig) ClusterMoveIngestRoutesAwayFromNode(int) error { + return nil +} + +func (h *MockClientConfig) Unregister(int) error { + return nil +} + +func (h *MockClientConfig) StartDataRedistribution() error { + return nil +} + +func (h *MockClientConfig) GetStoragePartitions() (*[]humioapi.StoragePartition, error) { + return h.apiClient.StoragePartitions, nil +} + +func (h *MockClientConfig) GetIngestPartitions() (*[]humioapi.IngestPartition, error) { + return h.apiClient.IngestPartitions, nil +} diff --git a/pkg/humio/cluster.go b/pkg/humio/cluster.go new file mode 100644 index 000000000..03aa3cd40 --- /dev/null +++ b/pkg/humio/cluster.go @@ -0,0 +1,345 @@ +package humio + +import ( + "fmt" + + humioapi "github.com/humio/cli/api" + corev1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" + "github.com/prometheus/common/log" + "github.com/shurcooL/graphql" +) + +// ClusterController holds our client +type ClusterController struct { + client Client +} + +// NewClusterController returns a ClusterController +func NewClusterController(client Client) *ClusterController { + return &ClusterController{client: client} +} + +// AreAllRegisteredNodesAvailable only returns true if all nodes registered with humio are available +func (c *ClusterController) AreAllRegisteredNodesAvailable() (bool, error) { + cluster, err := c.client.GetClusters() + if err != nil { + return false, err + } + + for _, n := range cluster.Nodes { + if !n.IsAvailable { + return false, nil + } + } + return true, nil +} + +// NoDataMissing only returns true if all data are available +func (c *ClusterController) NoDataMissing() (bool, error) { + cluster, err := c.client.GetClusters() + if err != nil { + return false, err + } + if cluster.MissingSegmentSize == 0 { + return true, nil + } + return false, nil +} + +// // IsNodeRegistered returns whether the Humio cluster has a node with the given node id +func (c *ClusterController) IsNodeRegistered(nodeID int) (bool, error) { + cluster, err := c.client.GetClusters() + if err != nil { + return false, err + } + + for _, node := range cluster.Nodes { + if int(node.Id) == nodeID { + return true, nil + } + } + return false, nil +} + +// CountNodesRegistered returns how many registered nodes there are in the cluster +func (c *ClusterController) CountNodesRegistered() (int, error) { + cluster, err := c.client.GetClusters() + if err != nil { + return -1, err + } + return len(cluster.Nodes), nil +} + +// CanBeSafelyUnregistered returns true if the Humio API indicates that the node can be safely unregistered. This should ensure that the node does not hold any data. +func (c *ClusterController) CanBeSafelyUnregistered(podID int) (bool, error) { + cluster, err := c.client.GetClusters() + if err != nil { + return false, err + } + + for _, node := range cluster.Nodes { + if int(node.Id) == podID && node.CanBeSafelyUnregistered { + return true, nil + } + } + return false, nil +} + +// AreStoragePartitionsBalanced ensures three things. +// First, if all storage partitions are consumed by the expected (target replication factor) number of storage nodes. +// Second, all storage nodes must have storage partitions assigned. +// Third, the difference in number of partitiones assigned per storage node must be at most 1. +func (c *ClusterController) AreStoragePartitionsBalanced(hc *corev1alpha1.HumioCluster) (bool, error) { + cluster, err := c.client.GetClusters() + if err != nil { + return false, err + } + + nodeToPartitionCount := make(map[int]int) + for _, nodeID := range cluster.Nodes { + nodeToPartitionCount[nodeID.Id] = 0 + } + + for _, partition := range cluster.StoragePartitions { + if len(partition.NodeIds) != hc.Spec.TargetReplicationFactor { + log.Info("the number of nodes in a partition does not match the replication factor") + return false, nil + } + for _, node := range partition.NodeIds { + nodeToPartitionCount[node]++ + } + } + + var min, max int + for i := 0; i < len(cluster.Nodes); i++ { + if nodeToPartitionCount[i] == 0 { + log.Infof("node id %d does not contain any partitions", i) + return false, nil + } + if min == 0 { + min = nodeToPartitionCount[i] + } + if max == 0 { + max = nodeToPartitionCount[i] + } + if nodeToPartitionCount[i] > max { + max = nodeToPartitionCount[i] + } + if nodeToPartitionCount[i] < min { + min = nodeToPartitionCount[i] + } + } + + if max-min > 1 { + log.Infof("the difference in number of partitions assigned per storage node is greater than 1, min=%d, max=%d", min, max) + return false, nil + } + + log.Infof("storage partitions are balanced min=%d, max=%d", min, max) + return true, nil + +} + +// RebalanceStoragePartitions will assign storage partitions evenly across registered storage nodes. If replication is not set, we set it to 1. +func (c *ClusterController) RebalanceStoragePartitions(hc *corev1alpha1.HumioCluster) error { + log.Info("rebalancing storage partitions") + + cluster, err := c.client.GetClusters() + if err != nil { + return err + } + + replication := hc.Spec.TargetReplicationFactor + if hc.Spec.TargetReplicationFactor == 0 { + replication = 1 + } + + var storageNodeIDs []int + + for _, node := range cluster.Nodes { + storageNodeIDs = append(storageNodeIDs, node.Id) + } + + partitionAssignment, err := generateStoragePartitionSchemeCandidate(storageNodeIDs, hc.Spec.StoragePartitionsCount, replication) + if err != nil { + return fmt.Errorf("could not generate storage partition scheme candidate: %v", err) + } + + if err := c.client.UpdateStoragePartitionScheme(partitionAssignment); err != nil { + return fmt.Errorf("could not update storage partition scheme: %v", err) + } + return nil +} + +// AreIngestPartitionsBalanced ensures three things. +// First, if all ingest partitions are consumed by the expected (target replication factor) number of digest nodes. +// Second, all digest nodes must have ingest partitions assigned. +// Third, the difference in number of partitiones assigned per digest node must be at most 1. +func (c *ClusterController) AreIngestPartitionsBalanced(hc *corev1alpha1.HumioCluster) (bool, error) { + cluster, err := c.client.GetClusters() + if err != nil { + return false, err + } + + // get a map that can tell us how many partitions a node has + nodeToPartitionCount := make(map[int]int) + for _, nodeID := range cluster.Nodes { + nodeToPartitionCount[nodeID.Id] = 0 + } + + for _, partition := range cluster.IngestPartitions { + if len(partition.NodeIds) != hc.Spec.TargetReplicationFactor { + // our target replication factor is not met + return false, nil + } + for _, node := range partition.NodeIds { + nodeToPartitionCount[node]++ + } + } + + var min, max int + for i := 0; i < len(cluster.Nodes); i++ { + if nodeToPartitionCount[i] == 0 { + log.Infof("node id %d does not contain any partitions", i) + return false, nil + } + if min == 0 { + min = nodeToPartitionCount[i] + } + if max == 0 { + max = nodeToPartitionCount[i] + } + if nodeToPartitionCount[i] > max { + max = nodeToPartitionCount[i] + } + if nodeToPartitionCount[i] < min { + min = nodeToPartitionCount[i] + } + } + + if max-min > 1 { + log.Infof("the difference in number of partitions assigned per storage node is greater than 1, min=%d, max=%d", min, max) + return false, nil + } + + log.Infof("storage partitions are balanced min=%d, max=%d", min, max) + return true, nil +} + +// RebalanceIngestPartitions will assign ingest partitions evenly across registered digest nodes. If replication is not set, we set it to 1. +func (c *ClusterController) RebalanceIngestPartitions(hc *corev1alpha1.HumioCluster) error { + log.Info("rebalancing ingest partitions") + + cluster, err := c.client.GetClusters() + if err != nil { + return err + } + + replication := hc.Spec.TargetReplicationFactor + if hc.Spec.TargetReplicationFactor == 0 { + replication = 1 + } + + var digestNodeIDs []int + + for _, node := range cluster.Nodes { + digestNodeIDs = append(digestNodeIDs, node.Id) + } + + partitionAssignment, err := generateIngestPartitionSchemeCandidate(hc, digestNodeIDs, hc.Spec.DigestPartitionsCount, replication) + if err != nil { + return fmt.Errorf("could not generate ingest partition scheme candidate: %v", err) + } + + if err := c.client.UpdateIngestPartitionScheme(partitionAssignment); err != nil { + return fmt.Errorf("could not update ingest partition scheme: %v", err) + } + return nil +} + +// StartDataRedistribution notifies the Humio cluster that it should start redistributing data to match current assignments +func (c *ClusterController) StartDataRedistribution(hc *corev1alpha1.HumioCluster) error { + log.Info("starting data redistribution") + + if err := c.client.StartDataRedistribution(); err != nil { + return fmt.Errorf("could not start data redistribution: %v", err) + } + return nil +} + +// MoveStorageRouteAwayFromNode notifies the Humio cluster that a node ID should be removed from handling any storage partitions +func (c *ClusterController) MoveStorageRouteAwayFromNode(hc *corev1alpha1.HumioCluster, pID int) error { + log.Info(fmt.Sprintf("moving storage route away from node %d", pID)) + + if err := c.client.ClusterMoveStorageRouteAwayFromNode(pID); err != nil { + return fmt.Errorf("could not move storage route away from node: %v", err) + } + return nil +} + +// MoveIngestRoutesAwayFromNode notifies the Humio cluster that a node ID should be removed from handling any ingest partitions +func (c *ClusterController) MoveIngestRoutesAwayFromNode(hc *corev1alpha1.HumioCluster, pID int) error { + log.Info(fmt.Sprintf("moving ingest routes away from node %d", pID)) + + if err := c.client.ClusterMoveIngestRoutesAwayFromNode(pID); err != nil { + return fmt.Errorf("could not move ingest routes away from node: %v", err) + } + return nil +} + +// ClusterUnregisterNode tells the Humio cluster that we want to unregister a node +func (c *ClusterController) ClusterUnregisterNode(hc *corev1alpha1.HumioCluster, pID int) error { + log.Info(fmt.Sprintf("unregistering node with id %d", pID)) + + err := c.client.Unregister(pID) + if err != nil { + return fmt.Errorf("could not unregister node: %v", err) + } + return nil +} + +func generateStoragePartitionSchemeCandidate(storageNodeIDs []int, partitionCount, targetReplication int) ([]humioapi.StoragePartitionInput, error) { + replicas := targetReplication + if targetReplication > len(storageNodeIDs) { + replicas = len(storageNodeIDs) + } + if replicas == 0 { + return nil, fmt.Errorf("not possible to use replication factor 0") + } + + var ps []humioapi.StoragePartitionInput + + for p := 0; p < partitionCount; p++ { + var nodeIds []graphql.Int + for r := 0; r < replicas; r++ { + idx := (p + r) % len(storageNodeIDs) + nodeIds = append(nodeIds, graphql.Int(storageNodeIDs[idx])) + } + ps = append(ps, humioapi.StoragePartitionInput{ID: graphql.Int(p), NodeIDs: nodeIds}) + } + + return ps, nil +} + +func generateIngestPartitionSchemeCandidate(hc *corev1alpha1.HumioCluster, ingestNodeIDs []int, partitionCount, targetReplication int) ([]humioapi.IngestPartitionInput, error) { + replicas := targetReplication + if targetReplication > len(ingestNodeIDs) { + replicas = len(ingestNodeIDs) + } + if replicas == 0 { + return nil, fmt.Errorf("not possible to use replication factor 0") + } + + var ps []humioapi.IngestPartitionInput + + for p := 0; p < partitionCount; p++ { + var nodeIds []graphql.Int + for r := 0; r < replicas; r++ { + idx := (p + r) % len(ingestNodeIDs) + nodeIds = append(nodeIds, graphql.Int(ingestNodeIDs[idx])) + } + ps = append(ps, humioapi.IngestPartitionInput{ID: graphql.Int(p), NodeIDs: nodeIds}) + } + + return ps, nil +} diff --git a/pkg/humio/cluster_test.go b/pkg/humio/cluster_test.go new file mode 100644 index 000000000..026db751b --- /dev/null +++ b/pkg/humio/cluster_test.go @@ -0,0 +1,826 @@ +package humio + +import ( + "reflect" + "testing" + + humioapi "github.com/humio/cli/api" + corev1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" +) + +func TestClusterController_AreAllRegisteredNodesAvailable(t *testing.T) { + type fields struct { + client Client + } + tests := []struct { + name string + fields fields + want bool + wantErr bool + }{ + { + "test available nodes", + fields{NewMocklient( + humioapi.Cluster{ + Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ + IsAvailable: true, + }}}, nil, nil, nil), + }, + true, + false, + }, + { + "test no available nodes", + fields{NewMocklient( + humioapi.Cluster{ + Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ + IsAvailable: false, + }}}, nil, nil, nil), + }, + false, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + got, err := c.AreAllRegisteredNodesAvailable() + if (err != nil) != tt.wantErr { + t.Errorf("ClusterController.AreAllRegisteredNodesAvailable() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ClusterController.AreAllRegisteredNodesAvailable() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClusterController_NoDataMissing(t *testing.T) { + type fields struct { + client Client + } + tests := []struct { + name string + fields fields + want bool + wantErr bool + }{ + { + "test no missing segments", + fields{NewMocklient( + humioapi.Cluster{ + MissingSegmentSize: 0, + }, nil, nil, nil), + }, + true, + false, + }, + { + "test missing segments", + fields{NewMocklient( + humioapi.Cluster{ + MissingSegmentSize: 1, + }, nil, nil, nil), + }, + false, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + got, err := c.NoDataMissing() + if (err != nil) != tt.wantErr { + t.Errorf("ClusterController.NoDataMissing() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ClusterController.NoDataMissing() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClusterController_IsNodeRegistered(t *testing.T) { + type fields struct { + client Client + } + type args struct { + nodeID int + } + tests := []struct { + name string + fields fields + args args + want bool + wantErr bool + }{ + { + "test node is registered", + fields{NewMocklient( + humioapi.Cluster{ + Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ + Id: 1, + }}}, nil, nil, nil), + }, + args{ + nodeID: 1, + }, + true, + false, + }, + { + "test node is not registered", + fields{NewMocklient( + humioapi.Cluster{ + Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ + Id: 2, + }}}, nil, nil, nil), + }, + args{ + nodeID: 1, + }, + false, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + got, err := c.IsNodeRegistered(tt.args.nodeID) + if (err != nil) != tt.wantErr { + t.Errorf("ClusterController.IsNodeRegistered() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ClusterController.IsNodeRegistered() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClusterController_CountNodesRegistered(t *testing.T) { + type fields struct { + client Client + } + tests := []struct { + name string + fields fields + want int + wantErr bool + }{ + { + "test count registered nodes", + fields{NewMocklient( + humioapi.Cluster{ + Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{}}}, nil, nil, nil), + }, + 1, + false, + }, + { + "test count no registered nodes", + fields{NewMocklient( + humioapi.Cluster{}, nil, nil, nil), + }, + 0, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + got, err := c.CountNodesRegistered() + if (err != nil) != tt.wantErr { + t.Errorf("ClusterController.CountNodesRegistered() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ClusterController.CountNodesRegistered() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClusterController_CanBeSafelyUnregistered(t *testing.T) { + type fields struct { + client Client + } + type args struct { + podID int + } + tests := []struct { + name string + fields fields + args args + want bool + wantErr bool + }{ + { + "test node is can be safely unregistered", + fields{NewMocklient( + humioapi.Cluster{ + Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ + Id: 1, + CanBeSafelyUnregistered: true, + }}}, nil, nil, nil), + }, + args{ + podID: 1, + }, + true, + false, + }, + { + "test node is cannot be safely unregistered", + fields{NewMocklient( + humioapi.Cluster{ + Nodes: []humioapi.ClusterNode{humioapi.ClusterNode{ + Id: 1, + CanBeSafelyUnregistered: false, + }}}, nil, nil, nil), + }, + args{ + podID: 1, + }, + false, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + got, err := c.CanBeSafelyUnregistered(tt.args.podID) + if (err != nil) != tt.wantErr { + t.Errorf("ClusterController.CanBeSafelyUnregistered() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ClusterController.CanBeSafelyUnregistered() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClusterController_IsStoragePartitionsBalanced(t *testing.T) { + type fields struct { + client Client + } + type args struct { + hc *corev1alpha1.HumioCluster + } + tests := []struct { + name string + fields fields + args args + want bool + wantErr bool + }{ + { + "test storage partitions are balanced", + fields{NewMocklient( + humioapi.Cluster{ + StoragePartitions: []humioapi.StoragePartition{ + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{1}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{2}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 1, + }, + }, + }, + true, + false, + }, + { + "test storage partitions do no equal the target replication factor", + fields{NewMocklient( + humioapi.Cluster{ + StoragePartitions: []humioapi.StoragePartition{ + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0, 1}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{1, 2}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{2, 0}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 1, + }, + }, + }, + false, + false, + }, + { + "test storage partitions are unbalanced by more than a factor of 1", + fields{NewMocklient( + humioapi.Cluster{ + StoragePartitions: []humioapi.StoragePartition{ + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0, 0, 0}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{1, 1, 1}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{2, 1, 1}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 3, + }, + }, + }, + false, + false, + }, + { + "test storage partitions are not balanced", + fields{NewMocklient( + humioapi.Cluster{ + StoragePartitions: []humioapi.StoragePartition{ + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0, 1}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{1, 0}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0, 1}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 1, + }, + }, + }, + false, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + got, err := c.AreStoragePartitionsBalanced(tt.args.hc) + if (err != nil) != tt.wantErr { + t.Errorf("ClusterController.AreStoragePartitionsBalanced() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ClusterController.AreStoragePartitionsBalanced() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClusterController_RebalanceStoragePartitions(t *testing.T) { + type fields struct { + client Client + expectedPartitions *[]humioapi.StoragePartition + } + type args struct { + hc *corev1alpha1.HumioCluster + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + "test rebalancing storage partitions", + fields{NewMocklient( + humioapi.Cluster{ + StoragePartitions: []humioapi.StoragePartition{ + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{0}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + &[]humioapi.StoragePartition{ + humioapi.StoragePartition{ + Id: 0, + NodeIds: []int{0, 1}, + }, + humioapi.StoragePartition{ + Id: 1, + NodeIds: []int{1, 2}, + }, + humioapi.StoragePartition{ + Id: 2, + NodeIds: []int{2, 0}, + }, + }, + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 2, + StoragePartitionsCount: 3, + }, + }, + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + if err := c.RebalanceStoragePartitions(tt.args.hc); (err != nil) != tt.wantErr { + t.Errorf("ClusterController.RebalanceStoragePartitions() error = %v, wantErr %v", err, tt.wantErr) + } + if sps, _ := c.client.GetStoragePartitions(); !reflect.DeepEqual(*sps, *tt.fields.expectedPartitions) { + t.Errorf("ClusterController.GetStoragePartitions() expected = %v, want %v", *tt.fields.expectedPartitions, *sps) + } + }) + } +} + +func TestClusterController_AreIngestPartitionsBalanced(t *testing.T) { + type fields struct { + client Client + } + type args struct { + hc *corev1alpha1.HumioCluster + } + tests := []struct { + name string + fields fields + args args + want bool + wantErr bool + }{ + { + "test ingest partitions are balanced", + fields{NewMocklient( + humioapi.Cluster{ + IngestPartitions: []humioapi.IngestPartition{ + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{1}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{2}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 1, + }, + }, + }, + true, + false, + }, + { + "test ingest partitions do no equal the target replication factor", + fields{NewMocklient( + humioapi.Cluster{ + IngestPartitions: []humioapi.IngestPartition{ + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0, 1}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{1, 2}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{2, 0}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 1, + }, + }, + }, + false, + false, + }, + { + "test ingest partitions are unbalanced by more than a factor of 1", + fields{NewMocklient( + humioapi.Cluster{ + IngestPartitions: []humioapi.IngestPartition{ + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0, 0, 0}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{1, 1, 1}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{2, 1, 1}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 3, + }, + }, + }, + false, + false, + }, + { + "test ingest partitions are not balanced", + fields{NewMocklient( + humioapi.Cluster{ + IngestPartitions: []humioapi.IngestPartition{ + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0, 1}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{1, 0}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0, 1}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 1, + }, + }, + }, + false, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + got, err := c.AreIngestPartitionsBalanced(tt.args.hc) + if (err != nil) != tt.wantErr { + t.Errorf("ClusterController.AreIngestPartitionsBalanced() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ClusterController.AreIngestPartitionsBalanced() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClusterController_RebalanceIngestPartitions(t *testing.T) { + type fields struct { + client Client + expectedPartitions *[]humioapi.IngestPartition + } + type args struct { + hc *corev1alpha1.HumioCluster + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + "test rebalancing ingest partitions", + fields{NewMocklient( + humioapi.Cluster{ + IngestPartitions: []humioapi.IngestPartition{ + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{0}, + }, + }, + Nodes: []humioapi.ClusterNode{ + humioapi.ClusterNode{ + Id: 0, + }, + humioapi.ClusterNode{ + Id: 1, + }, + humioapi.ClusterNode{ + Id: 2, + }, + }}, nil, nil, nil), + &[]humioapi.IngestPartition{ + humioapi.IngestPartition{ + Id: 0, + NodeIds: []int{0, 1}, + }, + humioapi.IngestPartition{ + Id: 1, + NodeIds: []int{1, 2}, + }, + humioapi.IngestPartition{ + Id: 2, + NodeIds: []int{2, 0}, + }, + }, + }, + args{ + &corev1alpha1.HumioCluster{ + Spec: corev1alpha1.HumioClusterSpec{ + TargetReplicationFactor: 2, + DigestPartitionsCount: 3, + }, + }, + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ClusterController{ + client: tt.fields.client, + } + if err := c.RebalanceIngestPartitions(tt.args.hc); (err != nil) != tt.wantErr { + t.Errorf("ClusterController.RebalanceIngestPartitions() error = %v, wantErr %v", err, tt.wantErr) + } + if sps, _ := c.client.GetIngestPartitions(); !reflect.DeepEqual(*sps, *tt.fields.expectedPartitions) { + t.Errorf("ClusterController.GetIngestPartitions() expected = %v, want %v", *tt.fields.expectedPartitions, *sps) + } + }) + } +} diff --git a/pkg/kubernetes/client.go b/pkg/kubernetes/client.go new file mode 100644 index 000000000..1174e4182 --- /dev/null +++ b/pkg/kubernetes/client.go @@ -0,0 +1,61 @@ +package kubernetes + +import ( + "context" + "fmt" + "net/http" + + corev1alpha1 "github.com/humio/humio-operator/pkg/apis/core/v1alpha1" + "github.com/prometheus/common/log" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ListPods grabs the list of all pods associated to a an instance of HumioCluster +func ListPods(c client.Client, hc *corev1alpha1.HumioCluster) ([]corev1.Pod, error) { + var foundPodList corev1.PodList + matchingLabels := client.MatchingLabels{ + "humio_cr": hc.Name, + } + // client.MatchingField also exists? + + err := c.List(context.TODO(), &foundPodList, client.InNamespace(hc.Namespace), matchingLabels) + if err != nil { + return nil, err + } + + return foundPodList.Items, nil +} + +// GetHumioBaseURL the first base URL for the first Humio node it can reach +func GetHumioBaseURL(c client.Client, hc *corev1alpha1.HumioCluster) (string, error) { + allPodsForCluster, err := ListPods(c, hc) + if err != nil { + return "", fmt.Errorf("could not list pods for cluster: %v", err) + } + for _, p := range allPodsForCluster { + if p.DeletionTimestamp == nil { + // only consider pods not being deleted + + if p.Status.PodIP == "" { + // skip pods with no pod IP + continue + } + + // check if we can reach the humio endpoint + humioBaseURL := "http://" + p.Status.PodIP + ":8080/" + resp, err := http.Get(humioBaseURL) + if err != nil { + log.Info(fmt.Sprintf("Humio API is unavailable: %v", err)) + continue + } + defer resp.Body.Close() + + // if request was ok, return the base URL + if resp.StatusCode == http.StatusOK { + return humioBaseURL, nil + } + } + } + return "", fmt.Errorf("did not find a valid base URL") +}