From cd46b038b83bf2c0a9a7da78430707058a4500b9 Mon Sep 17 00:00:00 2001 From: Minoru Osuka Date: Mon, 5 Aug 2019 00:28:39 +0900 Subject: [PATCH] Change protobuf for indexer and dispatcher --- README.md | 84 +- cmd/blast/dispatcher_node_health.go | 41 +- cmd/blast/indexer_cluster_info.go | 2 +- cmd/blast/indexer_cluster_leave.go | 5 +- cmd/blast/indexer_cluster_watch.go | 29 +- cmd/blast/indexer_node_health.go | 40 +- cmd/blast/indexer_node_info.go | 18 +- cmd/blast/indexer_start.go | 33 +- cmd/blast/main.go | 105 +- cmd/blast/manager_cluster_leave.go | 3 +- cmd/blast/manager_node_info.go | 6 +- cmd/blast/manager_watch.go | 2 +- dispatcher/grpc_client.go | 25 +- dispatcher/grpc_service.go | 425 ++++--- dispatcher/server_test.go | 378 +++--- indexer/grpc_client.go | 76 +- indexer/grpc_service.go | 666 ++++++----- indexer/index.go | 3 +- indexer/raft_fsm.go | 79 +- indexer/raft_server.go | 199 ++-- indexer/server.go | 104 +- indexer/server_test.go | 1599 ++++++++++++++------------ manager/grpc_client.go | 10 + manager/grpc_service.go | 15 +- manager/raft_fsm.go | 4 +- manager/raft_server.go | 7 +- manager/server_test.go | 85 +- protobuf/distribute/distribute.pb.go | 297 +++-- protobuf/distribute/distribute.proto | 29 +- protobuf/index/index.pb.go | 934 +++++++++------ protobuf/index/index.proto | 90 +- 31 files changed, 2912 insertions(+), 2481 deletions(-) diff --git a/README.md b/README.md index 9012660..e25d40d 100644 --- a/README.md +++ b/README.md @@ -273,15 +273,13 @@ You can see the result in JSON format. The result of the above command is: ```json { - "node_config": { - "bind_addr": ":2000", - "data_dir": "/tmp/blast/indexer1", - "grpc_addr": ":5000", - "http_addr": ":8000", - "node_id": "indexer1", - "raft_storage_type": "boltdb" - }, - "state": "Leader" + "id": "indexer1", + "bind_address": ":2000", + "state": 3, + "metadata": { + "grpc_address": ":5000", + "http_address": ":8000" + } } ``` @@ -684,38 +682,34 @@ You can see the result in JSON format. The result of the above command is: ```json { - "indexer1": { - "node_config": { - "bind_addr": ":2000", - "data_dir": "/tmp/blast/indexer1", - "grpc_addr": ":5000", - "http_addr": ":8000", - "node_id": "indexer1", - "raft_storage_type": "boltdb" - }, - "state": "Leader" - }, - "indexer2": { - "node_config": { - "bind_addr": ":2010", - "data_dir": "/tmp/blast/indexer2", - "grpc_addr": ":5010", - "http_addr": ":8010", - "node_id": "indexer2", - "raft_storage_type": "boltdb" + "nodes": { + "indexer1": { + "id": "indexer1", + "bind_address": ":2000", + "state": 3, + "metadata": { + "grpc_address": ":5000", + "http_address": ":8000" + } }, - "state": "Follower" - }, - "indexer3": { - "node_config": { - "bind_addr": ":2020", - "data_dir": "/tmp/blast/indexer3", - "grpc_addr": ":5020", - "http_addr": ":8020", - "node_id": "indexer3", - "raft_storage_type": "boltdb" + "indexer2": { + "id": "indexer2", + "bind_address": ":2010", + "state": 1, + "metadata": { + "grpc_address": ":5010", + "http_address": ":8010" + } }, - "state": "Follower" + "indexer3": { + "id": "indexer3", + "bind_address": ":2020", + "state": 1, + "metadata": { + "grpc_address": ":5020", + "http_address": ":8020" + } + } } } ``` @@ -786,9 +780,9 @@ Manager can also bring up a cluster like an indexer. Specify a common index mapp $ ./bin/blast manager start \ --grpc-address=:5100 \ --http-address=:8100 \ - --node-id=cluster1 \ + --node-id=manager1 \ --node-address=:2100 \ - --data-dir=/tmp/blast/cluster1 \ + --data-dir=/tmp/blast/manager1 \ --raft-storage-type=boltdb \ --index-mapping-file=./example/wiki_index_mapping.json \ --index-type=upside_down \ @@ -798,18 +792,18 @@ $ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5110 \ --http-address=:8110 \ - --node-id=cluster2 \ + --node-id=manager2 \ --node-address=:2110 \ - --data-dir=/tmp/blast/cluster2 \ + --data-dir=/tmp/blast/manager2 \ --raft-storage-type=boltdb $ ./bin/blast manager start \ --peer-grpc-address=:5100 \ --grpc-address=:5120 \ --http-address=:8120 \ - --node-id=cluster3 \ + --node-id=manager3 \ --node-address=:2120 \ - --data-dir=/tmp/blast/cluster3 \ + --data-dir=/tmp/blast/manager3 \ --raft-storage-type=boltdb ``` diff --git a/cmd/blast/dispatcher_node_health.go b/cmd/blast/dispatcher_node_health.go index 698473e..5fb1b8f 100644 --- a/cmd/blast/dispatcher_node_health.go +++ b/cmd/blast/dispatcher_node_health.go @@ -18,12 +18,15 @@ import ( "fmt" "os" + "github.com/mosuka/blast/protobuf/distribute" + "github.com/mosuka/blast/dispatcher" "github.com/urfave/cli" ) func dispatcherNodeHealth(c *cli.Context) error { grpcAddr := c.String("grpc-address") + healthiness := c.Bool("healthiness") liveness := c.Bool("liveness") readiness := c.Bool("readiness") @@ -38,34 +41,30 @@ func dispatcherNodeHealth(c *cli.Context) error { } }() - if !liveness && !readiness { - LivenessState, err := client.LivenessProbe() + var state string + if healthiness { + state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { - return err + state = distribute.NodeHealthCheckResponse_UNHEALTHY.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", LivenessState)) - - readinessState, err := client.ReadinessProbe() + } else if liveness { + state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { - return err + state = distribute.NodeHealthCheckResponse_DEAD.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", readinessState)) - } else { - if liveness { - state, err := client.LivenessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else if readiness { + state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_READINESS.String()) + if err != nil { + state = distribute.NodeHealthCheckResponse_NOT_READY.String() } - if readiness { - state, err := client.ReadinessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else { + state, err = client.NodeHealthCheck(distribute.NodeHealthCheckRequest_HEALTHINESS.String()) + if err != nil { + state = distribute.NodeHealthCheckResponse_UNHEALTHY.String() } } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + return nil } diff --git a/cmd/blast/indexer_cluster_info.go b/cmd/blast/indexer_cluster_info.go index 3e8f1d8..434c011 100644 --- a/cmd/blast/indexer_cluster_info.go +++ b/cmd/blast/indexer_cluster_info.go @@ -37,7 +37,7 @@ func indexerClusterInfo(c *cli.Context) error { } }() - cluster, err := client.GetCluster() + cluster, err := client.ClusterInfo() if err != nil { return err } diff --git a/cmd/blast/indexer_cluster_leave.go b/cmd/blast/indexer_cluster_leave.go index b0be2d9..e564256 100644 --- a/cmd/blast/indexer_cluster_leave.go +++ b/cmd/blast/indexer_cluster_leave.go @@ -33,10 +33,9 @@ func indexerClusterLeave(c *cli.Context) error { // get grpc address of leader node } - grpcAddr := c.String("grpc-address") nodeId := c.String("node-id") - client, err := indexer.NewGRPCClient(grpcAddr) + client, err := indexer.NewGRPCClient(peerGrpcAddr) if err != nil { return err } @@ -47,7 +46,7 @@ func indexerClusterLeave(c *cli.Context) error { } }() - err = client.DeleteNode(nodeId) + err = client.ClusterLeave(nodeId) if err != nil { return err } diff --git a/cmd/blast/indexer_cluster_watch.go b/cmd/blast/indexer_cluster_watch.go index 1a5097f..ba99bdb 100644 --- a/cmd/blast/indexer_cluster_watch.go +++ b/cmd/blast/indexer_cluster_watch.go @@ -16,14 +16,13 @@ package main import ( "encoding/json" - "errors" "fmt" "io" "log" "os" "github.com/mosuka/blast/indexer" - "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) @@ -41,12 +40,22 @@ func indexerClusterWatch(c *cli.Context) error { } }() - err = indexerClusterInfo(c) + cluster, err := client.ClusterInfo() if err != nil { return err } + resp := &index.ClusterWatchResponse{ + Event: 0, + Node: nil, + Cluster: cluster, + } + clusterBytes, err := json.MarshalIndent(resp, "", " ") + if err != nil { + return err + } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(clusterBytes))) - watchClient, err := client.WatchCluster() + watchClient, err := client.ClusterWatch() if err != nil { return err } @@ -61,17 +70,7 @@ func indexerClusterWatch(c *cli.Context) error { break } - cluster, err := protobuf.MarshalAny(resp.Cluster) - if err != nil { - return err - } - if cluster == nil { - return errors.New("nil") - } - - var clusterBytes []byte - clusterMap := *cluster.(*map[string]interface{}) - clusterBytes, err = json.MarshalIndent(clusterMap, "", " ") + clusterBytes, err = json.MarshalIndent(resp, "", " ") if err != nil { return err } diff --git a/cmd/blast/indexer_node_health.go b/cmd/blast/indexer_node_health.go index beab1c0..aedb6eb 100644 --- a/cmd/blast/indexer_node_health.go +++ b/cmd/blast/indexer_node_health.go @@ -19,11 +19,13 @@ import ( "os" "github.com/mosuka/blast/indexer" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) func indexerNodeHealth(c *cli.Context) error { grpcAddr := c.String("grpc-address") + healthiness := c.Bool("healthiness") liveness := c.Bool("liveness") readiness := c.Bool("readiness") @@ -38,34 +40,30 @@ func indexerNodeHealth(c *cli.Context) error { } }() - if !liveness && !readiness { - LivenessState, err := client.LivenessProbe() + var state string + if healthiness { + state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { - return err + state = index.NodeHealthCheckResponse_UNHEALTHY.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", LivenessState)) - - readinessState, err := client.ReadinessProbe() + } else if liveness { + state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { - return err + state = index.NodeHealthCheckResponse_DEAD.String() } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", readinessState)) - } else { - if liveness { - state, err := client.LivenessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else if readiness { + state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) + if err != nil { + state = index.NodeHealthCheckResponse_NOT_READY.String() } - if readiness { - state, err := client.ReadinessProbe() - if err != nil { - return err - } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + } else { + state, err = client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) + if err != nil { + state = index.NodeHealthCheckResponse_UNHEALTHY.String() } } + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", state)) + return nil } diff --git a/cmd/blast/indexer_node_info.go b/cmd/blast/indexer_node_info.go index ce35cd1..0ab3ad5 100644 --- a/cmd/blast/indexer_node_info.go +++ b/cmd/blast/indexer_node_info.go @@ -24,20 +24,8 @@ import ( ) func indexerNodeInfo(c *cli.Context) error { - clusterGrpcAddr := c.String("cluster-grpc-address") - shardId := c.String("shard-id") - peerGrpcAddr := c.String("peer-grpc-address") - - if clusterGrpcAddr != "" && shardId != "" { - - } else if peerGrpcAddr != "" { - - } - grpcAddr := c.String("grpc-address") - nodeId := c.Args().Get(0) - client, err := indexer.NewGRPCClient(grpcAddr) if err != nil { return err @@ -49,17 +37,17 @@ func indexerNodeInfo(c *cli.Context) error { } }() - metadata, err := client.GetNode(nodeId) + node, err := client.NodeInfo() if err != nil { return err } - metadataBytes, err := json.MarshalIndent(metadata, "", " ") + nodeBytes, err := json.MarshalIndent(node, "", " ") if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(metadataBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(nodeBytes))) return nil } diff --git a/cmd/blast/indexer_start.go b/cmd/blast/indexer_start.go index a716efe..0afd811 100644 --- a/cmd/blast/indexer_start.go +++ b/cmd/blast/indexer_start.go @@ -24,11 +24,12 @@ import ( "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" + "github.com/mosuka/blast/protobuf/index" "github.com/urfave/cli" ) func indexerStart(c *cli.Context) error { - clusterGRPCAddr := c.String("manager-grpc-address") + managerGRPCAddr := c.String("manager-grpc-address") shardId := c.String("shard-id") peerGRPCAddr := c.String("peer-grpc-address") @@ -93,26 +94,14 @@ func indexerStart(c *cli.Context) error { httpLogCompress, ) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - if clusterGRPCAddr != "" { - clusterConfig.ManagerAddr = clusterGRPCAddr - } - if shardId != "" { - clusterConfig.ClusterId = shardId - } - if peerGRPCAddr != "" { - clusterConfig.PeerAddr = peerGRPCAddr - } - - // create node config - nodeConfig := &config.NodeConfig{ - NodeId: nodeId, - BindAddr: nodeAddr, - GRPCAddr: grpcAddr, - HTTPAddr: httpAddr, - DataDir: dataDir, - RaftStorageType: raftStorageType, + node := &index.Node{ + Id: nodeId, + BindAddress: nodeAddr, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddr, + HttpAddress: httpAddr, + }, } var err error @@ -135,7 +124,7 @@ func indexerStart(c *cli.Context) error { IndexStorageType: indexStorageType, } - svr, err := indexer.NewServer(clusterConfig, nodeConfig, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) + svr, err := indexer.NewServer(managerGRPCAddr, shardId, peerGRPCAddr, node, dataDir, raftStorageType, indexConfig, logger.Named(nodeId), grpcLogger.Named(nodeId), httpAccessLogger) if err != nil { return err } diff --git a/cmd/blast/main.go b/cmd/blast/main.go index 1997d3f..e889f32 100644 --- a/cmd/blast/main.go +++ b/cmd/blast/main.go @@ -557,43 +557,27 @@ func main() { Name: "info", Usage: "Get node information", Flags: []cli.Flag{ - //cli.StringFlag{ - // Name: "cluster-grpc-address", - // Value: "", - // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "shard-id", - // Value: "", - // Usage: "Shard ID registered in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "peer-grpc-address", - // Value: "", - // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "node-id", - // Value: "", - // Usage: "The node ID for which to retrieve the node information", - //}, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, Action: indexerNodeInfo, }, { - Name: "health", + Name: "healthcheck", Usage: "Health check the node", Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, + cli.BoolFlag{ + Name: "healthiness", + Usage: "healthiness probe", + }, cli.BoolFlag{ Name: "liveness", Usage: "Liveness probe", @@ -615,29 +599,9 @@ func main() { Name: "info", Usage: "Get cluster information", Flags: []cli.Flag{ - //cli.StringFlag{ - // Name: "cluster-grpc-address", - // Value: "", - // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "shard-id", - // Value: "", - // Usage: "Shard ID registered in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "peer-grpc-address", - // Value: "", - // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "node-id", - // Value: "", - // Usage: "The node ID for which to retrieve the node information", - //}, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, @@ -647,29 +611,9 @@ func main() { Name: "watch", Usage: "Watch cluster", Flags: []cli.Flag{ - //cli.StringFlag{ - // Name: "cluster-grpc-address", - // Value: "", - // Usage: "The gRPC address of the cluster in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "shard-id", - // Value: "", - // Usage: "Shard ID registered in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "peer-grpc-address", - // Value: "", - // Usage: "The gRPC address of the peer node in which the target node for retrieving the information is joining", - //}, - //cli.StringFlag{ - // Name: "node-id", - // Value: "", - // Usage: "The node ID for which to retrieve the node information", - //}, cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC address of the node for which to retrieve the node information", }, }, @@ -694,11 +638,6 @@ func main() { Value: "", Usage: "The gRPC address of the peer node that exists in the cluster to be joined", }, - cli.StringFlag{ - Name: "grpc-address", - Value: "", - Usage: "The gRPC listen address", - }, cli.StringFlag{ Name: "node-id", Value: "", @@ -715,7 +654,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -733,7 +672,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -755,7 +694,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -773,7 +712,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -791,7 +730,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5000", Usage: "The gRPC listen address", }, }, @@ -932,14 +871,18 @@ func main() { Usage: "Command for blast dispatcher node", Subcommands: []cli.Command{ { - Name: "health", + Name: "healthcheck", Usage: "Health check the node", Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, + cli.BoolFlag{ + Name: "healthiness", + Usage: "healthiness probe", + }, cli.BoolFlag{ Name: "liveness", Usage: "Liveness probe", @@ -959,7 +902,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -977,7 +920,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -999,7 +942,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, cli.StringFlag{ @@ -1017,7 +960,7 @@ func main() { Flags: []cli.Flag{ cli.StringFlag{ Name: "grpc-address", - Value: "", + Value: ":5200", Usage: "The gRPC listen address", }, cli.StringFlag{ diff --git a/cmd/blast/manager_cluster_leave.go b/cmd/blast/manager_cluster_leave.go index b50a277..408f0ec 100644 --- a/cmd/blast/manager_cluster_leave.go +++ b/cmd/blast/manager_cluster_leave.go @@ -29,10 +29,9 @@ func managerClusterLeave(c *cli.Context) error { // get grpc address of leader node } - grpcAddr := c.String("grpc-address") nodeId := c.String("node-id") - client, err := manager.NewGRPCClient(grpcAddr) + client, err := manager.NewGRPCClient(peerGrpcAddr) if err != nil { return err } diff --git a/cmd/blast/manager_node_info.go b/cmd/blast/manager_node_info.go index 55f0e1d..85314a2 100644 --- a/cmd/blast/manager_node_info.go +++ b/cmd/blast/manager_node_info.go @@ -37,17 +37,17 @@ func managerNodeInfo(c *cli.Context) error { } }() - metadata, err := client.NodeInfo() + node, err := client.NodeInfo() if err != nil { return err } - metadataBytes, err := json.MarshalIndent(metadata, "", " ") + nodeBytes, err := json.MarshalIndent(node, "", " ") if err != nil { return err } - _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(metadataBytes))) + _, _ = fmt.Fprintln(os.Stdout, fmt.Sprintf("%v", string(nodeBytes))) return nil } diff --git a/cmd/blast/manager_watch.go b/cmd/blast/manager_watch.go index 273927b..bab09af 100644 --- a/cmd/blast/manager_watch.go +++ b/cmd/blast/manager_watch.go @@ -70,7 +70,7 @@ func managerWatch(c *cli.Context) error { switch value.(type) { case *map[string]interface{}: valueMap := *value.(*map[string]interface{}) - valueBytes, err = json.MarshalIndent(valueMap, "", " ") + valueBytes, err = json.Marshal(valueMap) if err != nil { return err } diff --git a/dispatcher/grpc_client.go b/dispatcher/grpc_client.go index a042b07..b1e4820 100644 --- a/dispatcher/grpc_client.go +++ b/dispatcher/grpc_client.go @@ -21,7 +21,6 @@ import ( "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/protobuf" @@ -97,23 +96,25 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) +func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { + req := &distribute.NodeHealthCheckRequest{} - return distribute.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + switch probe { + case distribute.NodeHealthCheckRequest_HEALTHINESS.String(): + req.Probe = distribute.NodeHealthCheckRequest_HEALTHINESS + case distribute.NodeHealthCheckRequest_LIVENESS.String(): + req.Probe = distribute.NodeHealthCheckRequest_LIVENESS + case distribute.NodeHealthCheckRequest_READINESS.String(): + req.Probe = distribute.NodeHealthCheckRequest_READINESS + default: + req.Probe = distribute.NodeHealthCheckRequest_HEALTHINESS } - return resp.State.String(), nil -} - -func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) + resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - return distribute.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + return distribute.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) } return resp.State.String(), nil diff --git a/dispatcher/grpc_service.go b/dispatcher/grpc_service.go index ed5ca8d..f51c94c 100644 --- a/dispatcher/grpc_service.go +++ b/dispatcher/grpc_service.go @@ -16,19 +16,20 @@ package dispatcher import ( "context" + "encoding/json" "errors" "hash/fnv" "io" "math/rand" - "reflect" "sort" "sync" "time" + "github.com/mosuka/blast/protobuf/index" + "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/search" "github.com/golang/protobuf/ptypes/any" - "github.com/golang/protobuf/ptypes/empty" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/manager" @@ -42,34 +43,51 @@ import ( ) type GRPCService struct { - managerAddr string - logger *zap.Logger + managerGrpcAddress string + logger *zap.Logger managers *management.Cluster managerClients map[string]*manager.GRPCClient updateManagersStopCh chan struct{} updateManagersDoneCh chan struct{} - indexers map[string]interface{} + //indexers map[string]interface{} + indexers map[string]*index.Cluster indexerClients map[string]map[string]*indexer.GRPCClient updateIndexersStopCh chan struct{} updateIndexersDoneCh chan struct{} } -func NewGRPCService(managerAddr string, logger *zap.Logger) (*GRPCService, error) { +func NewGRPCService(managerGrpcAddress string, logger *zap.Logger) (*GRPCService, error) { return &GRPCService{ - managerAddr: managerAddr, - logger: logger, + managerGrpcAddress: managerGrpcAddress, + logger: logger, managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, managerClients: make(map[string]*manager.GRPCClient, 0), - indexers: make(map[string]interface{}, 0), + //indexers: make(map[string]interface{}, 0), + indexers: make(map[string]*index.Cluster, 0), indexerClients: make(map[string]map[string]*indexer.GRPCClient, 0), }, nil } func (s *GRPCService) Start() error { + var err error + s.managers, err = s.getManagerCluster(s.managerGrpcAddress) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + for id, node := range s.managers.Nodes { + client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Fatal(err.Error(), zap.String("id", id), zap.String("grpc_address", s.managerGrpcAddress)) + } + s.managerClients[node.Id] = client + } + s.logger.Info("start to update manager cluster info") go s.startUpdateManagers(500 * time.Millisecond) @@ -80,12 +98,12 @@ func (s *GRPCService) Start() error { } func (s *GRPCService) Stop() error { - s.logger.Info("stop to update manager cluster info") - s.stopUpdateManagers() - s.logger.Info("stop to update indexer cluster info") s.stopUpdateIndexers() + s.logger.Info("stop to update manager cluster info") + s.stopUpdateManagers() + return nil } @@ -117,8 +135,8 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { return nil, err } -func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluster, error) { - client, err := manager.NewGRPCClient(s.managerAddr) +func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster, error) { + client, err := manager.NewGRPCClient(managerAddr) defer func() { err := client.Close() if err != nil { @@ -140,6 +158,21 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluste return managers, nil } +func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { + b, err := json.Marshal(cluster) + if err != nil { + return nil, err + } + + var clone *management.Cluster + err = json.Unmarshal(b, &clone) + if err != nil { + return nil, err + } + + return clone, nil +} + func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.updateManagersStopCh = make(chan struct{}) s.updateManagersDoneCh = make(chan struct{}) @@ -148,51 +181,20 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { close(s.updateManagersDoneCh) }() - var err error - - // get initial managers - s.managers, err = s.getInitialManagers(s.managerAddr) - if err != nil { - s.logger.Error(err.Error()) - return - } - s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) - - // create clients for managers - for nodeId, node := range s.managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) - continue - } - - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - } - if client != nil { - s.managerClients[nodeId] = client - } - } - for { select { case <-s.updateManagersStopCh: s.logger.Info("received a request to stop updating a manager cluster") return default: + // get client for manager from the list client, err := s.getManagerClient() if err != nil { s.logger.Error(err.Error()) continue } - // create stream + // create stream for watching cluster changes stream, err := client.ClusterWatch() if err != nil { s.logger.Error(err.Error()) @@ -209,80 +211,77 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - managers := resp.Cluster - - if !reflect.DeepEqual(s.managers, managers) { - // open clients - for nodeId, node := range managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) - continue + s.logger.Info("cluster has changed", zap.Any("resp", resp)) + switch resp.Event { + case management.ClusterWatchResponse_JOIN, management.ClusterWatchResponse_UPDATE: + // add to cluster nodes + s.managers.Nodes[resp.Node.Id] = resp.Node + + // check node state + switch resp.Node.State { + case management.Node_UNKNOWN, management.Node_SHUTDOWN: + // close client + if client, exist := s.managerClients[resp.Node.Id]; exist { + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) + } + delete(s.managerClients, resp.Node.Id) } - - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + default: // management.Node_FOLLOWER, management.Node_CANDIDATE, management.Node_LEADER + if resp.Node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) continue } - client, exist := s.managerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("node_id", nodeId)) - - if client.GetAddress() != node.Metadata.GrpcAddress { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - - delete(s.managerClients, nodeId) - + // check client that already exist in the client list + if client, exist := s.managerClients[resp.Node.Id]; !exist { + // create new client + s.logger.Info("create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + continue + } + s.managerClients[resp.Node.Id] = newClient + } else { + if client.GetAddress() != resp.Node.Metadata.GrpcAddress { + // close client + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) err = client.Close() if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) } + delete(s.managerClients, resp.Node.Id) - newClient, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) + // re-create new client + s.logger.Info("re-create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + continue } - - if newClient != nil { - s.managerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - } - if newClient != nil { - s.managerClients[nodeId] = newClient + s.managerClients[resp.Node.Id] = newClient } } } - - // close nonexistent clients - for nodeId, client := range s.managerClients { - if nodeConfig, exist := managers.Nodes[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.managerClients, nodeId) + case management.ClusterWatchResponse_LEAVE: + if client, exist := s.managerClients[resp.Node.Id]; exist { + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) } + delete(s.managerClients, resp.Node.Id) } - // keep current manager cluster - s.managers = managers - s.logger.Debug("managers", zap.Any("managers", s.managers)) + if _, exist := s.managers.Nodes[resp.Node.Id]; exist { + delete(s.managers.Nodes, resp.Node.Id) + } + default: + s.logger.Debug("unknown event", zap.Any("event", resp.Event)) + continue } } } @@ -316,16 +315,6 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { close(s.updateIndexersDoneCh) }() - // wait for manager available - s.logger.Info("wait for manager clients are available") - for { - if len(s.managerClients) > 0 { - s.logger.Info("manager clients are available") - break - } - time.Sleep(100 * time.Millisecond) - } - // get active client for manager client, err := s.getManagerClient() if err != nil { @@ -333,57 +322,44 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { } // get initial indexers - clusters, err := client.Get("/cluster_config/clusters/") + shards, err := client.Get("/cluster/shards") if err != nil { - s.logger.Error(err.Error()) + s.logger.Fatal(err.Error()) + return } - if clusters == nil { - s.logger.Error("nil") + if shards == nil { + s.logger.Error("/cluster/shards is nil") } - s.indexers = *clusters.(*map[string]interface{}) - - // create clients for indexer - for clusterId, cluster := range s.indexers { - cm, ok := cluster.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("cluster", cm)) + for shardId, shardIntr := range *shards.(*map[string]interface{}) { + shardBytes, err := json.Marshal(shardIntr) + if err != nil { + s.logger.Error(err.Error()) continue } - nodes, ok := cm["nodes"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("nodes", nodes)) + var shard *index.Cluster + err = json.Unmarshal(shardBytes, &shard) + if err != nil { + s.logger.Error(err.Error()) continue } - for nodeId, node := range nodes { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } - - metadata, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("node_config", metadata)) - continue - } + s.indexers[shardId] = shard - grpcAddr, ok := metadata["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + for nodeId, node := range shard.Nodes { + if node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - client, err := indexer.NewGRPCClient(metadata["grpc_addr"].(string)) + newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } - if _, exist := s.indexerClients[clusterId]; !exist { - s.indexerClients[clusterId] = make(map[string]*indexer.GRPCClient) + if _, exist := s.indexerClients[shardId]; !exist { + s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) } - s.indexerClients[clusterId][nodeId] = client + s.indexerClients[shardId][nodeId] = newClient } } @@ -399,7 +375,7 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { continue } - stream, err := client.Watch("/cluster_config/clusters/") + stream, err := client.Watch("/cluster/shards/") if err != nil { s.logger.Error(err.Error()) continue @@ -414,95 +390,85 @@ func (s *GRPCService) startUpdateIndexers(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - s.logger.Debug("data has changed", zap.String("key", resp.Key)) + s.logger.Debug("data has changed", zap.Any("command", resp.Command), zap.String("key", resp.Key), zap.Any("value", resp.Value)) - cluster, err := client.Get("/cluster_config/clusters/") + shardsIntr, err := client.Get("/cluster/shards/") if err != nil { s.logger.Error(err.Error()) continue } - if cluster == nil { - s.logger.Error("nil") + if shardsIntr == nil { + s.logger.Error("/cluster/shards is nil") continue } - indexers := *cluster.(*map[string]interface{}) - - // compare previous manager with current manager - if !reflect.DeepEqual(s.indexers, indexers) { - // create clients for indexer - for clusterId, cluster := range s.indexers { - cm, ok := cluster.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("cluster", cm)) - continue - } + for shardId, shardIntr := range *shards.(*map[string]interface{}) { + shardBytes, err := json.Marshal(shardIntr) + if err != nil { + s.logger.Error(err.Error()) + continue + } - nodes, ok := cm["nodes"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("cluster_id", clusterId), zap.Any("nodes", nodes)) - continue - } + var shard *index.Cluster + err = json.Unmarshal(shardBytes, &shard) + if err != nil { + s.logger.Error(err.Error()) + continue + } - for nodeId, node := range nodes { - nm, ok := node.(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId)) - continue - } + s.indexers[shardId] = shard - nodeConfig, ok := nm["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } + if _, exist := s.indexerClients[shardId]; !exist { + s.indexerClients[shardId] = make(map[string]*indexer.GRPCClient) + } - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing gRPC address", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + // open clients for indexer nodes + for nodeId, node := range shard.Nodes { + if node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", node.Id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue + } + + // check client that already exist in the client list + if client, exist := s.indexerClients[shardId][node.Id]; !exist { + // create new client + newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) continue } - - client, exist := s.indexerClients[clusterId][nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("node_id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.indexerClients[clusterId], nodeId) - - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := indexer.NewGRPCClient(grpcAddr) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.indexerClients[clusterId][nodeId] = newClient - } + s.indexerClients[shardId][nodeId] = newClient + } else { + if client.GetAddress() != node.Metadata.GrpcAddress { + // close client + s.logger.Info("close gRPC client", zap.String("id", node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", node.Id)) } + delete(s.indexerClients[shardId], node.Id) - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - newClient, err := indexer.NewGRPCClient(nodeConfig["grpc_addr"].(string)) + // re-create new client + newClient, err := indexer.NewGRPCClient(node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } - if _, exist := s.indexerClients[clusterId]; !exist { - s.indexerClients[clusterId] = make(map[string]*indexer.GRPCClient) - } - s.indexerClients[clusterId][nodeId] = newClient + s.indexerClients[shardId][nodeId] = newClient } } } + // close clients for non-existent indexer nodes + for id, client := range s.indexerClients[shardId] { + if _, exist := s.indexers[shardId].Nodes[id]; !exist { + s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + } + delete(s.indexerClients[shardId], id) + } + } } } } @@ -548,17 +514,16 @@ func (s *GRPCService) getIndexerClients() map[string]*indexer.GRPCClient { return indexerClients } -func (s *GRPCService) LivenessProbe(ctx context.Context, req *empty.Empty) (*distribute.LivenessProbeResponse, error) { - resp := &distribute.LivenessProbeResponse{ - State: distribute.LivenessProbeResponse_ALIVE, - } - - return resp, nil -} +func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *distribute.NodeHealthCheckRequest) (*distribute.NodeHealthCheckResponse, error) { + resp := &distribute.NodeHealthCheckResponse{} -func (s *GRPCService) ReadinessProbe(ctx context.Context, req *empty.Empty) (*distribute.ReadinessProbeResponse, error) { - resp := &distribute.ReadinessProbeResponse{ - State: distribute.ReadinessProbeResponse_READY, + switch req.Probe { + case distribute.NodeHealthCheckRequest_HEALTHINESS: + resp.State = distribute.NodeHealthCheckResponse_HEALTHY + case distribute.NodeHealthCheckRequest_LIVENESS: + resp.State = distribute.NodeHealthCheckResponse_ALIVE + case distribute.NodeHealthCheckRequest_READINESS: + resp.State = distribute.NodeHealthCheckResponse_READY } return resp, nil diff --git a/dispatcher/server_test.go b/dispatcher/server_test.go index 2aae862..29d0577 100644 --- a/dispatcher/server_test.go +++ b/dispatcher/server_test.go @@ -22,11 +22,11 @@ import ( "testing" "time" - "github.com/hashicorp/raft" "github.com/mosuka/blast/config" "github.com/mosuka/blast/indexer" "github.com/mosuka/blast/logutils" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/index" "github.com/mosuka/blast/protobuf/management" "github.com/mosuka/blast/strutils" "github.com/mosuka/blast/testutils" @@ -205,69 +205,122 @@ func TestServer_Start(t *testing.T) { // // indexer cluster1 // - // create cluster config - indexerClusterConfig1 := config.DefaultClusterConfig() - indexerClusterConfig1.ManagerAddr = managerGrpcAddress1 - indexerClusterConfig1.ClusterId = "cluster1" - // create node config - indexerNodeConfig1 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress1 := managerGrpcAddress1 + indexerShardId1 := "shard-1" + indexerPeerGrpcAddress1 := "" + indexerGrpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir1 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig1.DataDir) + _ = os.RemoveAll(indexerDataDir1) }() - indexer1, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig1, config.DefaultIndexConfig(), logger.Named("indexer1"), grpcLogger.Named("indexer1"), httpAccessLogger) + indexerRaftStorageType1 := "boltdb" + + indexerNode1 := &index.Node{ + Id: indexerNodeId1, + BindAddress: indexerBindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress1, + HttpAddress: indexerHttpAddress1, + }, + } + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer1, err := indexer.NewServer(indexerManagerGrpcAddress1, indexerShardId1, indexerPeerGrpcAddress1, indexerNode1, indexerDataDir1, indexerRaftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer1 != nil { - indexer1.Stop() - } + indexerServer1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer1.Start() + indexerServer1.Start() + // sleep time.Sleep(5 * time.Second) - // create node config - indexerNodeConfig2 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress2 := managerGrpcAddress1 + indexerShardId2 := "shard-1" + indexerPeerGrpcAddress2 := "" + indexerGrpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir2 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig2.DataDir) + _ = os.RemoveAll(indexerDataDir2) }() - indexer2, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig2, config.DefaultIndexConfig(), logger.Named("indexer2"), grpcLogger.Named("indexer2"), httpAccessLogger) + indexerRaftStorageType2 := "boltdb" + + indexerNode2 := &index.Node{ + Id: indexerNodeId2, + BindAddress: indexerBindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress2, + HttpAddress: indexerHttpAddress2, + }, + } + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer2, err := indexer.NewServer(indexerManagerGrpcAddress2, indexerShardId2, indexerPeerGrpcAddress2, indexerNode2, indexerDataDir2, indexerRaftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer2 != nil { - indexer2.Stop() - } + indexerServer2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer2.Start() + indexerServer2.Start() + // sleep time.Sleep(5 * time.Second) - // create node config - indexerNodeConfig3 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress3 := managerGrpcAddress1 + indexerShardId3 := "shard-1" + indexerPeerGrpcAddress3 := "" + indexerGrpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir3 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig3.DataDir) + _ = os.RemoveAll(indexerDataDir3) }() - indexer3, err := indexer.NewServer(indexerClusterConfig1, indexerNodeConfig3, config.DefaultIndexConfig(), logger.Named("indexer3"), grpcLogger.Named("indexer3"), httpAccessLogger) + indexerRaftStorageType3 := "boltdb" + + indexerNode3 := &index.Node{ + Id: indexerNodeId3, + BindAddress: indexerBindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress3, + HttpAddress: indexerHttpAddress3, + }, + } + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer3, err := indexer.NewServer(indexerManagerGrpcAddress3, indexerShardId3, indexerPeerGrpcAddress3, indexerNode3, indexerDataDir3, indexerRaftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer3 != nil { - indexer3.Stop() - } + indexerServer3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer3.Start() + indexerServer3.Start() + // sleep time.Sleep(5 * time.Second) // gRPC client for manager1 - indexerClient1, err := indexer.NewGRPCClient(indexerNodeConfig1.GRPCAddr) + indexerClient1, err := indexer.NewGRPCClient(indexerNode1.Metadata.GrpcAddress) defer func() { _ = indexerClient1.Close() }() @@ -275,119 +328,165 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - indexerCluster1, err := indexerClient1.GetCluster() + indexerCluster1, err := indexerClient1.ClusterInfo() if err != nil { t.Fatalf("%v", err) } - expIndexerCluster1 := map[string]interface{}{ - indexerNodeConfig1.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - indexerNodeConfig2.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - indexerNodeConfig3.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig3.ToMap(), - "state": raft.Follower.String(), + expIndexerCluster1 := &index.Cluster{ + Nodes: map[string]*index.Node{ + indexerNodeId1: { + Id: indexerNodeId1, + BindAddress: indexerBindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress1, + HttpAddress: indexerHttpAddress1, + }, + }, + indexerNodeId2: { + Id: indexerNodeId2, + BindAddress: indexerBindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress2, + HttpAddress: indexerHttpAddress2, + }, + }, + indexerNodeId3: { + Id: indexerNodeId3, + BindAddress: indexerBindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress3, + HttpAddress: indexerHttpAddress3, + }, + }, }, } actIndexerCluster1 := indexerCluster1 - expIndexerNodeConfig1 := expIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig1 := actIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig1, actIndexerNodeConfig1) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig1, actIndexerNodeConfig1) - } - actIndexerState1 := actIndexerCluster1[indexerNodeConfig1.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState1 && raft.Follower.String() != actIndexerState1 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState1) - } - expIndexerNodeConfig2 := expIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig2 := actIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig2, actIndexerNodeConfig2) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig2, actIndexerNodeConfig2) - } - actIndexerState2 := actIndexerCluster1[indexerNodeConfig2.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState2 && raft.Follower.String() != actIndexerState2 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState2) - } - expIndexerNodeConfig3 := expIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig3 := actIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig3, actIndexerNodeConfig3) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig3, actIndexerNodeConfig3) - } - actIndexerState3 := actIndexerCluster1[indexerNodeConfig3.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState3 && raft.Follower.String() != actIndexerState3 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState3) + if !reflect.DeepEqual(expIndexerCluster1, actIndexerCluster1) { + t.Fatalf("expected content to see %v, saw %v", expIndexerCluster1, actIndexerCluster1) } // // indexer cluster2 // - // create cluster config - indexerClusterConfig2 := config.DefaultClusterConfig() - indexerClusterConfig2.ManagerAddr = managerGrpcAddress1 - indexerClusterConfig2.ClusterId = "cluster2" - // create node config - indexerNodeConfig4 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress4 := managerGrpcAddress1 + indexerShardId4 := "shard-2" + indexerPeerGrpcAddress4 := "" + indexerGrpcAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId4 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress4 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir4 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig4.DataDir) + _ = os.RemoveAll(indexerDataDir4) }() - indexer4, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig4, config.DefaultIndexConfig(), logger.Named("indexer4"), grpcLogger.Named("indexer4"), httpAccessLogger) + indexerRaftStorageType4 := "boltdb" + + indexerNode4 := &index.Node{ + Id: indexerNodeId4, + BindAddress: indexerBindAddress4, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress4, + HttpAddress: indexerHttpAddress4, + }, + } + indexConfig4, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer4, err := indexer.NewServer(indexerManagerGrpcAddress4, indexerShardId4, indexerPeerGrpcAddress4, indexerNode4, indexerDataDir4, indexerRaftStorageType4, indexConfig4, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer4 != nil { - indexer4.Stop() - } + indexerServer4.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer4.Start() + indexerServer4.Start() + // sleep time.Sleep(5 * time.Second) - // create node config - indexerNodeConfig5 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress5 := managerGrpcAddress1 + indexerShardId5 := "shard-2" + indexerPeerGrpcAddress5 := "" + indexerGrpcAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId5 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress5 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir5 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig5.DataDir) + _ = os.RemoveAll(indexerDataDir5) }() - indexer5, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig5, config.DefaultIndexConfig(), logger.Named("indexer5"), grpcLogger.Named("indexer5"), httpAccessLogger) + indexerRaftStorageType5 := "boltdb" + + indexerNode5 := &index.Node{ + Id: indexerNodeId5, + BindAddress: indexerBindAddress5, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress5, + HttpAddress: indexerHttpAddress5, + }, + } + indexConfig5, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer5, err := indexer.NewServer(indexerManagerGrpcAddress5, indexerShardId5, indexerPeerGrpcAddress5, indexerNode5, indexerDataDir5, indexerRaftStorageType5, indexConfig5, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer5 != nil { - indexer5.Stop() - } + indexerServer5.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer5.Start() + indexerServer5.Start() + // sleep time.Sleep(5 * time.Second) - // create node config - indexerNodeConfig6 := testutils.TmpNodeConfig() + indexerManagerGrpcAddress6 := managerGrpcAddress1 + indexerShardId6 := "shard-2" + indexerPeerGrpcAddress6 := "" + indexerGrpcAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerHttpAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerNodeId6 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + indexerBindAddress6 := fmt.Sprintf(":%d", testutils.TmpPort()) + indexerDataDir6 := testutils.TmpDir() defer func() { - _ = os.RemoveAll(indexerNodeConfig6.DataDir) + _ = os.RemoveAll(indexerDataDir6) }() - indexer6, err := indexer.NewServer(indexerClusterConfig2, indexerNodeConfig6, config.DefaultIndexConfig(), logger.Named("indexer6"), grpcLogger.Named("indexer6"), httpAccessLogger) + indexerRaftStorageType6 := "boltdb" + + indexerNode6 := &index.Node{ + Id: indexerNodeId6, + BindAddress: indexerBindAddress6, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress6, + HttpAddress: indexerHttpAddress6, + }, + } + indexConfig6, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + indexerServer6, err := indexer.NewServer(indexerManagerGrpcAddress6, indexerShardId6, indexerPeerGrpcAddress6, indexerNode6, indexerDataDir6, indexerRaftStorageType6, indexConfig6, logger, grpcLogger, httpAccessLogger) defer func() { - if indexer6 != nil { - indexer6.Stop() - } + indexerServer6.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server - indexer6.Start() + indexerServer6.Start() + // sleep time.Sleep(5 * time.Second) // gRPC client for manager1 - indexerClient2, err := indexer.NewGRPCClient(indexerNodeConfig4.GRPCAddr) + indexerClient2, err := indexer.NewGRPCClient(indexerNode4.Metadata.GrpcAddress) defer func() { _ = indexerClient1.Close() }() @@ -395,51 +494,44 @@ func TestServer_Start(t *testing.T) { t.Fatalf("%v", err) } // get cluster info from manager1 - indexerCluster2, err := indexerClient2.GetCluster() + indexerCluster2, err := indexerClient2.ClusterInfo() if err != nil { t.Fatalf("%v", err) } - expIndexerCluster2 := map[string]interface{}{ - indexerNodeConfig4.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig4.ToMap(), - "state": raft.Leader.String(), - }, - indexerNodeConfig5.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig5.ToMap(), - "state": raft.Follower.String(), - }, - indexerNodeConfig6.NodeId: map[string]interface{}{ - "node_config": indexerNodeConfig6.ToMap(), - "state": raft.Follower.String(), + expIndexerCluster2 := &index.Cluster{ + Nodes: map[string]*index.Node{ + indexerNodeId4: { + Id: indexerNodeId4, + BindAddress: indexerBindAddress4, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress4, + HttpAddress: indexerHttpAddress4, + }, + }, + indexerNodeId5: { + Id: indexerNodeId5, + BindAddress: indexerBindAddress5, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress5, + HttpAddress: indexerHttpAddress5, + }, + }, + indexerNodeId6: { + Id: indexerNodeId6, + BindAddress: indexerBindAddress6, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: indexerGrpcAddress6, + HttpAddress: indexerHttpAddress6, + }, + }, }, } actIndexerCluster2 := indexerCluster2 - expIndexerNodeConfig4 := expIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig4 := actIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig4, actIndexerNodeConfig4) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig4, actIndexerNodeConfig4) - } - actIndexerState4 := actIndexerCluster2[indexerNodeConfig4.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState4 && raft.Follower.String() != actIndexerState4 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState4) - } - expIndexerNodeConfig5 := expIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig5 := actIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig5, actIndexerNodeConfig5) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig5, actIndexerNodeConfig5) - } - actIndexerState5 := actIndexerCluster2[indexerNodeConfig5.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState5 && raft.Follower.String() != actIndexerState5 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState5) - } - expIndexerNodeConfig6 := expIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - actIndexerNodeConfig6 := actIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["node_config"].(map[string]interface{}) - if !reflect.DeepEqual(expIndexerNodeConfig6, actIndexerNodeConfig6) { - t.Fatalf("expected content to see %v, saw %v", expIndexerNodeConfig6, actIndexerNodeConfig6) - } - actIndexerState6 := actIndexerCluster2[indexerNodeConfig6.NodeId].(map[string]interface{})["state"].(string) - if raft.Leader.String() != actIndexerState6 && raft.Follower.String() != actIndexerState6 { - t.Fatalf("expected content to see %v or %v, saw %v", raft.Leader.String(), raft.Follower.String(), actIndexerState6) + if !reflect.DeepEqual(expIndexerCluster2, actIndexerCluster2) { + t.Fatalf("expected content to see %v, saw %v", expIndexerCluster2, actIndexerCluster2) } // diff --git a/indexer/grpc_client.go b/indexer/grpc_client.go index e5cdbf6..e955e3b 100644 --- a/indexer/grpc_client.go +++ b/indexer/grpc_client.go @@ -97,64 +97,47 @@ func (c *GRPCClient) GetAddress() string { return c.conn.Target() } -func (c *GRPCClient) LivenessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.LivenessProbe(c.ctx, &empty.Empty{}) - if err != nil { - st, _ := status.FromError(err) +func (c *GRPCClient) NodeHealthCheck(probe string, opts ...grpc.CallOption) (string, error) { + req := &index.NodeHealthCheckRequest{} - return index.LivenessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + switch probe { + case index.NodeHealthCheckRequest_HEALTHINESS.String(): + req.Probe = index.NodeHealthCheckRequest_HEALTHINESS + case index.NodeHealthCheckRequest_LIVENESS.String(): + req.Probe = index.NodeHealthCheckRequest_LIVENESS + case index.NodeHealthCheckRequest_READINESS.String(): + req.Probe = index.NodeHealthCheckRequest_READINESS + default: + req.Probe = index.NodeHealthCheckRequest_HEALTHINESS } - return resp.State.String(), nil -} - -func (c *GRPCClient) ReadinessProbe(opts ...grpc.CallOption) (string, error) { - resp, err := c.client.ReadinessProbe(c.ctx, &empty.Empty{}) + resp, err := c.client.NodeHealthCheck(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) - return index.ReadinessProbeResponse_UNKNOWN.String(), errors.New(st.Message()) + return index.NodeHealthCheckResponse_UNHEALTHY.String(), errors.New(st.Message()) } return resp.State.String(), nil } -func (c *GRPCClient) GetNode(id string, opts ...grpc.CallOption) (map[string]interface{}, error) { - req := &index.GetNodeRequest{ - Id: id, - } - - resp, err := c.client.GetNode(c.ctx, req, opts...) +func (c *GRPCClient) NodeInfo(opts ...grpc.CallOption) (*index.Node, error) { + resp, err := c.client.NodeInfo(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) } - ins, err := protobuf.MarshalAny(resp.NodeConfig) - nodeConfig := *ins.(*map[string]interface{}) - - node := map[string]interface{}{ - "node_config": nodeConfig, - "state": resp.State, - } - - return node, nil + return resp.Node, nil } -func (c *GRPCClient) SetNode(id string, nodeConfig map[string]interface{}, opts ...grpc.CallOption) error { - nodeConfigAny := &any.Any{} - err := protobuf.UnmarshalAny(nodeConfig, nodeConfigAny) - if err != nil { - return err - } - - req := &index.SetNodeRequest{ - Id: id, - NodeConfig: nodeConfigAny, +func (c *GRPCClient) ClusterJoin(node *index.Node, opts ...grpc.CallOption) error { + req := &index.ClusterJoinRequest{ + Node: node, } - _, err = c.client.SetNode(c.ctx, req, opts...) + _, err := c.client.ClusterJoin(c.ctx, req, opts...) if err != nil { return err } @@ -162,12 +145,12 @@ func (c *GRPCClient) SetNode(id string, nodeConfig map[string]interface{}, opts return nil } -func (c *GRPCClient) DeleteNode(id string, opts ...grpc.CallOption) error { - req := &index.DeleteNodeRequest{ +func (c *GRPCClient) ClusterLeave(id string, opts ...grpc.CallOption) error { + req := &index.ClusterLeaveRequest{ Id: id, } - _, err := c.client.DeleteNode(c.ctx, req, opts...) + _, err := c.client.ClusterLeave(c.ctx, req, opts...) if err != nil { return err } @@ -175,24 +158,21 @@ func (c *GRPCClient) DeleteNode(id string, opts ...grpc.CallOption) error { return nil } -func (c *GRPCClient) GetCluster(opts ...grpc.CallOption) (map[string]interface{}, error) { - resp, err := c.client.GetCluster(c.ctx, &empty.Empty{}, opts...) +func (c *GRPCClient) ClusterInfo(opts ...grpc.CallOption) (*index.Cluster, error) { + resp, err := c.client.ClusterInfo(c.ctx, &empty.Empty{}, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) } - ins, err := protobuf.MarshalAny(resp.Cluster) - cluster := *ins.(*map[string]interface{}) - - return cluster, nil + return resp.Cluster, nil } -func (c *GRPCClient) WatchCluster(opts ...grpc.CallOption) (index.Index_WatchClusterClient, error) { +func (c *GRPCClient) ClusterWatch(opts ...grpc.CallOption) (index.Index_ClusterWatchClient, error) { req := &empty.Empty{} - watchClient, err := c.client.WatchCluster(c.ctx, req, opts...) + watchClient, err := c.client.ClusterWatch(c.ctx, req, opts...) if err != nil { st, _ := status.FromError(err) return nil, errors.New(st.Message()) diff --git a/indexer/grpc_service.go b/indexer/grpc_service.go index 5b8d5cb..18007ff 100644 --- a/indexer/grpc_service.go +++ b/indexer/grpc_service.go @@ -16,18 +16,18 @@ package indexer import ( "context" + "encoding/json" "errors" "fmt" "io" - "reflect" "sync" "time" "github.com/blevesearch/bleve" "github.com/golang/protobuf/ptypes/any" "github.com/golang/protobuf/ptypes/empty" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/manager" @@ -40,16 +40,17 @@ import ( ) type GRPCService struct { - clusterConfig *config.ClusterConfig - raftServer *RaftServer - logger *zap.Logger + managerGrpcAddress string + shardId string + raftServer *RaftServer + logger *zap.Logger updateClusterStopCh chan struct{} updateClusterDoneCh chan struct{} - peers map[string]interface{} + peers *index.Cluster peerClients map[string]*GRPCClient - cluster map[string]interface{} - clusterChans map[chan index.GetClusterResponse]struct{} + cluster *index.Cluster + clusterChans map[chan index.ClusterWatchResponse]struct{} clusterMutex sync.RWMutex managers *management.Cluster @@ -58,16 +59,17 @@ type GRPCService struct { updateManagersDoneCh chan struct{} } -func NewGRPCService(clusterConfig *config.ClusterConfig, raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { +func NewGRPCService(managerGrpcAddress string, shardId string, raftServer *RaftServer, logger *zap.Logger) (*GRPCService, error) { return &GRPCService{ - clusterConfig: clusterConfig, - raftServer: raftServer, - logger: logger, + managerGrpcAddress: managerGrpcAddress, + shardId: shardId, + raftServer: raftServer, + logger: logger, - peers: make(map[string]interface{}, 0), + peers: &index.Cluster{Nodes: make(map[string]*index.Node, 0)}, peerClients: make(map[string]*GRPCClient, 0), - cluster: make(map[string]interface{}, 0), - clusterChans: make(map[chan index.GetClusterResponse]struct{}), + cluster: &index.Cluster{Nodes: make(map[string]*index.Node, 0)}, + clusterChans: make(map[chan index.ClusterWatchResponse]struct{}), managers: &management.Cluster{Nodes: make(map[string]*management.Node, 0)}, managerClients: make(map[string]*manager.GRPCClient, 0), @@ -75,14 +77,29 @@ func NewGRPCService(clusterConfig *config.ClusterConfig, raftServer *RaftServer, } func (s *GRPCService) Start() error { - s.logger.Info("start to update cluster info") - go s.startUpdateCluster(500 * time.Millisecond) + if s.managerGrpcAddress != "" { + var err error + s.managers, err = s.getManagerCluster(s.managerGrpcAddress) + if err != nil { + s.logger.Fatal(err.Error()) + return err + } + + for id, node := range s.managers.Nodes { + client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Fatal(err.Error(), zap.String("id", id), zap.String("grpc_address", s.managerGrpcAddress)) + } + s.managerClients[node.Id] = client + } - if s.clusterConfig.ManagerAddr != "" { s.logger.Info("start to update manager cluster info") go s.startUpdateManagers(500 * time.Millisecond) } + s.logger.Info("start to update cluster info") + go s.startUpdateCluster(500 * time.Millisecond) + return nil } @@ -90,7 +107,7 @@ func (s *GRPCService) Stop() error { s.logger.Info("stop to update cluster info") s.stopUpdateCluster() - if s.clusterConfig.ManagerAddr != "" { + if s.managerGrpcAddress != "" { s.logger.Info("stop to update manager cluster info") s.stopUpdateManagers() } @@ -126,7 +143,7 @@ func (s *GRPCService) getManagerClient() (*manager.GRPCClient, error) { return nil, err } -func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluster, error) { +func (s *GRPCService) getManagerCluster(managerAddr string) (*management.Cluster, error) { client, err := manager.NewGRPCClient(managerAddr) defer func() { err := client.Close() @@ -149,6 +166,21 @@ func (s *GRPCService) getInitialManagers(managerAddr string) (*management.Cluste return managers, nil } +func (s *GRPCService) cloneManagerCluster(cluster *management.Cluster) (*management.Cluster, error) { + b, err := json.Marshal(cluster) + if err != nil { + return nil, err + } + + var clone *management.Cluster + err = json.Unmarshal(b, &clone) + if err != nil { + return nil, err + } + + return clone, nil +} + func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.updateManagersStopCh = make(chan struct{}) s.updateManagersDoneCh = make(chan struct{}) @@ -157,50 +189,20 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { close(s.updateManagersDoneCh) }() - var err error - - // get initial managers - s.managers, err = s.getInitialManagers(s.clusterConfig.ManagerAddr) - if err != nil { - s.logger.Error(err.Error()) - return - } - s.logger.Debug("initialize manager list", zap.Any("managers", s.managers)) - - // create clients for managers - for nodeId, node := range s.managers.Nodes { - if node.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("id", nodeId)) - continue - } - - if node.Metadata.GrpcAddress == "" { - s.logger.Warn("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - continue - } - - s.logger.Debug("create gRPC client", zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - client, err := manager.NewGRPCClient(node.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("id", nodeId), zap.String("grpc_addr", node.Metadata.GrpcAddress)) - } - if client != nil { - s.managerClients[nodeId] = client - } - } - for { select { case <-s.updateManagersStopCh: s.logger.Info("received a request to stop updating a manager cluster") return default: + // get client for manager from the list client, err := s.getManagerClient() if err != nil { s.logger.Error(err.Error()) continue } + // create stream for watching cluster changes stream, err := client.ClusterWatch() if err != nil { s.logger.Error(err.Error()) @@ -217,80 +219,77 @@ func (s *GRPCService) startUpdateManagers(checkInterval time.Duration) { s.logger.Error(err.Error()) continue } - managers := resp.Cluster - - if !reflect.DeepEqual(s.managers, managers) { - // open clients - for nodeId, nodeConfig := range managers.Nodes { - if nodeConfig.Metadata == nil { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId)) - continue + s.logger.Info("cluster has changed", zap.Any("resp", resp)) + switch resp.Event { + case management.ClusterWatchResponse_JOIN, management.ClusterWatchResponse_UPDATE: + // add to cluster nodes + s.managers.Nodes[resp.Node.Id] = resp.Node + + // check node state + switch resp.Node.State { + case management.Node_UNKNOWN, management.Node_SHUTDOWN: + // close client + if client, exist := s.managerClients[resp.Node.Id]; exist { + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) + } + delete(s.managerClients, resp.Node.Id) } - - if nodeConfig.Metadata.GrpcAddress == "" { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) + default: // management.Node_FOLLOWER, management.Node_CANDIDATE, management.Node_LEADER + if resp.Node.Metadata.GrpcAddress == "" { + s.logger.Warn("missing gRPC address", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) continue } - client, exist := s.managerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in manager list", zap.String("id", nodeId)) - - if client.GetAddress() != nodeConfig.Metadata.GrpcAddress { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - - delete(s.managerClients, nodeId) - + // check client that already exist in the client list + if client, exist := s.managerClients[resp.Node.Id]; !exist { + // create new client + s.logger.Info("create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + continue + } + s.managerClients[resp.Node.Id] = newClient + } else { + if client.GetAddress() != resp.Node.Metadata.GrpcAddress { + // close client + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) err = client.Close() if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId)) + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id)) } + delete(s.managerClients, resp.Node.Id) - newClient, err := manager.NewGRPCClient(nodeConfig.Metadata.GrpcAddress) + // re-create new client + s.logger.Info("re-create gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + newClient, err := manager.NewGRPCClient(resp.Node.Metadata.GrpcAddress) if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", resp.Node.Metadata.GrpcAddress)) + continue } - - if newClient != nil { - s.managerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) - - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - newClient, err := manager.NewGRPCClient(nodeConfig.Metadata.GrpcAddress) - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", nodeConfig.Metadata.GrpcAddress)) - } - if newClient != nil { - s.managerClients[nodeId] = newClient + s.managerClients[resp.Node.Id] = newClient } } } - - // close nonexistent clients - for nodeId, client := range s.managerClients { - if nodeConfig, exist := managers.Nodes[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("address", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.String("address", client.GetAddress())) - } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.managerClients, nodeId) + case management.ClusterWatchResponse_LEAVE: + if client, exist := s.managerClients[resp.Node.Id]; exist { + s.logger.Info("close gRPC client", zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", resp.Node.Id), zap.String("grpc_addr", client.GetAddress())) } + delete(s.managerClients, resp.Node.Id) } - // keep current manager cluster - s.managers = managers - s.logger.Debug("managers", zap.Any("managers", s.managers)) + if _, exist := s.managers.Nodes[resp.Node.Id]; exist { + delete(s.managers.Nodes, resp.Node.Id) + } + default: + s.logger.Debug("unknown event", zap.Any("event", resp.Event)) + continue } } } @@ -317,34 +316,33 @@ func (s *GRPCService) stopUpdateManagers() { } func (s *GRPCService) getLeaderClient() (*GRPCClient, error) { - var client *GRPCClient - - for id, node := range s.cluster { - state, ok := node.(map[string]interface{})["state"].(string) - if !ok { - s.logger.Warn("missing state", zap.String("id", id), zap.String("state", state)) - continue - } - - if state == raft.Leader.String() { - client, ok = s.peerClients[id] - if ok { - break - } else { - s.logger.Error("node does not exist", zap.String("id", id)) + for id, node := range s.cluster.Nodes { + switch node.State { + case index.Node_LEADER: + if client, exist := s.peerClients[id]; exist { + return client, nil } - } else { - s.logger.Debug("not a leader", zap.String("id", id)) } } - if client == nil { - err := errors.New("there is no leader") - s.logger.Error(err.Error()) + err := errors.New("there is no leader") + s.logger.Error(err.Error()) + return nil, err +} + +func (s *GRPCService) cloneCluster(cluster *index.Cluster) (*index.Cluster, error) { + b, err := json.Marshal(cluster) + if err != nil { return nil, err } - return client, nil + var clone *index.Cluster + err = json.Unmarshal(b, &clone) + if err != nil { + return nil, err + } + + return clone, nil } func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { @@ -358,138 +356,168 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { ticker := time.NewTicker(checkInterval) defer ticker.Stop() + savedCluster, err := s.cloneCluster(s.cluster) + if err != nil { + s.logger.Error(err.Error()) + return + } + for { select { case <-s.updateClusterStopCh: s.logger.Info("received a request to stop updating a cluster") return case <-ticker.C: - cluster, err := s.getCluster() + s.cluster, err = s.getCluster() + if err != nil { + s.logger.Error(err.Error()) + return + } + + snapshotCluster, err := s.cloneCluster(s.cluster) if err != nil { s.logger.Error(err.Error()) return } // create peer node list with out self node - peers := make(map[string]interface{}, 0) - for nodeId, node := range cluster { - if nodeId != s.NodeID() { - peers[nodeId] = node + for id, node := range snapshotCluster.Nodes { + if id != s.NodeID() { + s.peers.Nodes[id] = node } } - if !reflect.DeepEqual(s.peers, peers) { - // open clients - for nodeId, nodeInfo := range peers { - nodeConfig, ok := nodeInfo.(map[string]interface{})["node_config"].(map[string]interface{}) - if !ok { - s.logger.Warn("assertion failed", zap.String("node_id", nodeId), zap.Any("node_info", nodeInfo)) - continue - } - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Warn("missing metadata", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - continue - } - - client, exist := s.peerClients[nodeId] - if exist { - s.logger.Debug("client has already exist in peer list", zap.String("node_id", nodeId)) - - if client.GetAddress() != grpcAddr { - s.logger.Debug("gRPC address has been changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - s.logger.Debug("recreate gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - - delete(s.peerClients, nodeId) - - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId)) - } - - newClient, err := NewGRPCClient(grpcAddr) - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - } - - if newClient != nil { - s.peerClients[nodeId] = newClient - } - } else { - s.logger.Debug("gRPC address has not changed", zap.String("node_id", nodeId), zap.String("client_grpc_addr", client.GetAddress()), zap.String("grpc_addr", grpcAddr)) - } - } else { - s.logger.Debug("client does not exist in peer list", zap.String("node_id", nodeId)) + // open clients for peer nodes + for id, node := range s.peers.Nodes { + if node.Metadata.GrpcAddress == "" { + s.logger.Debug("missing gRPC address", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue + } - s.logger.Debug("create gRPC client", zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) - peerClient, err := NewGRPCClient(grpcAddr) + client, exist := s.peerClients[id] + if exist { + if client.GetAddress() != node.Metadata.GrpcAddress { + s.logger.Info("recreate gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + delete(s.peerClients, id) + err = client.Close() if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", grpcAddr)) + s.logger.Warn(err.Error(), zap.String("id", id)) } - if peerClient != nil { - s.logger.Debug("append peer client to peer client list", zap.String("grpc_addr", peerClient.GetAddress())) - s.peerClients[nodeId] = peerClient + newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Error(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } + s.peerClients[id] = newClient + } + } else { + s.logger.Info("create gRPC client", zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + newClient, err := NewGRPCClient(node.Metadata.GrpcAddress) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", node.Metadata.GrpcAddress)) + continue } + s.peerClients[id] = newClient } + } - // close nonexistent clients - for nodeId, client := range s.peerClients { - if nodeConfig, exist := peers[nodeId]; !exist { - s.logger.Info("this client is no longer in use", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) + // close clients for non-existent peer nodes + for id, client := range s.peerClients { + if _, exist := s.peers.Nodes[id]; !exist { + s.logger.Info("close gRPC client", zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + err = client.Close() + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.String("grpc_addr", client.GetAddress())) + } + delete(s.peerClients, id) + } + } - s.logger.Debug("close client", zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) - err = client.Close() - if err != nil { - s.logger.Warn(err.Error(), zap.String("node_id", nodeId), zap.String("grpc_addr", client.GetAddress())) + // check joined and updated nodes + for id, node := range snapshotCluster.Nodes { + nodeSnapshot, exist := savedCluster.Nodes[id] + if exist { + // node exists in the cluster + n1, err := json.Marshal(node) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) + continue + } + n2, err := json.Marshal(nodeSnapshot) + if err != nil { + s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) + continue + } + if !cmp.Equal(n1, n2) { + // node updated + // notify the cluster changes + clusterResp := &index.ClusterWatchResponse{ + Event: index.ClusterWatchResponse_UPDATE, + Node: node, + Cluster: snapshotCluster, } - - s.logger.Debug("delete client", zap.String("node_id", nodeId)) - delete(s.peerClients, nodeId) + for c := range s.clusterChans { + c <- *clusterResp + } + } + } else { + // node joined + // notify the cluster changes + clusterResp := &index.ClusterWatchResponse{ + Event: index.ClusterWatchResponse_JOIN, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp } } + } - // keep current peer nodes - s.logger.Debug("current peers", zap.Any("peers", peers)) - s.peers = peers - } else { - s.logger.Debug("there is no change in peers", zap.Any("peers", peers)) + // check left nodes + for id, node := range savedCluster.Nodes { + if _, exist := snapshotCluster.Nodes[id]; !exist { + // node left + // notify the cluster changes + clusterResp := &index.ClusterWatchResponse{ + Event: index.ClusterWatchResponse_LEAVE, + Node: node, + Cluster: snapshotCluster, + } + for c := range s.clusterChans { + c <- *clusterResp + } + } } - // notify current cluster - if !reflect.DeepEqual(s.cluster, cluster) { - // convert to GetClusterResponse for channel output - clusterResp := &index.GetClusterResponse{} - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) + // set cluster state to manager + if !cmp.Equal(savedCluster, snapshotCluster) && s.managerGrpcAddress != "" && s.raftServer.IsLeader() { + snapshotClusterBytes, err := json.Marshal(snapshotCluster) if err != nil { - s.logger.Warn(err.Error()) + s.logger.Error(err.Error()) + continue } - clusterResp.Cluster = clusterAny - - // output to channel - for c := range s.clusterChans { - c <- *clusterResp + var snapshotClusterMap map[string]interface{} + err = json.Unmarshal(snapshotClusterBytes, &snapshotClusterMap) + if err != nil { + s.logger.Error(err.Error()) + continue } - // notify cluster config to manager - if s.clusterConfig.ManagerAddr != "" && s.raftServer.IsLeader() { - client, err := s.getManagerClient() - if err != nil { - s.logger.Error(err.Error()) - } - err = client.Set(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId), cluster) - if err != nil { - s.logger.Error(err.Error()) - } + client, err := s.getManagerClient() + if err != nil { + s.logger.Error(err.Error()) + continue + } + s.logger.Info("update shards", zap.Any("shards", snapshotClusterMap)) + err = client.Set(fmt.Sprintf("cluster/shards/%s", s.shardId), snapshotClusterMap) + if err != nil { + s.logger.Error(err.Error()) + continue } - - // keep current cluster - s.logger.Debug("current cluster", zap.Any("cluster", cluster)) - s.cluster = cluster - } else { - s.logger.Debug("there is no change in cluster", zap.Any("cluster", cluster)) } + + savedCluster = snapshotCluster default: time.Sleep(100 * time.Millisecond) } @@ -516,17 +544,16 @@ func (s *GRPCService) stopUpdateCluster() { s.logger.Info("the cluster update has been stopped") } -func (s *GRPCService) LivenessProbe(ctx context.Context, req *empty.Empty) (*index.LivenessProbeResponse, error) { - resp := &index.LivenessProbeResponse{ - State: index.LivenessProbeResponse_ALIVE, - } +func (s *GRPCService) NodeHealthCheck(ctx context.Context, req *index.NodeHealthCheckRequest) (*index.NodeHealthCheckResponse, error) { + resp := &index.NodeHealthCheckResponse{} - return resp, nil -} - -func (s *GRPCService) ReadinessProbe(ctx context.Context, req *empty.Empty) (*index.ReadinessProbeResponse, error) { - resp := &index.ReadinessProbeResponse{ - State: index.ReadinessProbeResponse_READY, + switch req.Probe { + case index.NodeHealthCheckRequest_HEALTHINESS: + resp.State = index.NodeHealthCheckResponse_HEALTHY + case index.NodeHealthCheckRequest_LIVENESS: + resp.State = index.NodeHealthCheckResponse_ALIVE + case index.NodeHealthCheckRequest_READINESS: + resp.State = index.NodeHealthCheckResponse_READY } return resp, nil @@ -536,90 +563,73 @@ func (s *GRPCService) NodeID() string { return s.raftServer.NodeID() } -func (s *GRPCService) getSelfNode() (map[string]interface{}, error) { - return map[string]interface{}{ - "node_config": s.raftServer.nodeConfig.ToMap(), - "state": s.raftServer.State().String(), - }, nil -} - -func (s *GRPCService) getPeerNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} - var err error +func (s *GRPCService) getSelfNode() *index.Node { + node := s.raftServer.node - if peerClient, exist := s.peerClients[id]; exist { - nodeInfo, err = peerClient.GetNode(id) - if err != nil { - s.logger.Warn(err.Error()) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), - } - } - } else { - s.logger.Warn("node does not exist in peer list", zap.String("id", id)) - nodeInfo = map[string]interface{}{ - "node_config": map[string]interface{}{}, - "state": raft.Shutdown.String(), - } + switch s.raftServer.State() { + case raft.Follower: + node.State = index.Node_FOLLOWER + case raft.Candidate: + node.State = index.Node_CANDIDATE + case raft.Leader: + node.State = index.Node_LEADER + case raft.Shutdown: + node.State = index.Node_SHUTDOWN + default: + node.State = index.Node_UNKNOWN } - return nodeInfo, nil + return node } -func (s *GRPCService) getNode(id string) (map[string]interface{}, error) { - var nodeInfo map[string]interface{} - var err error - - if id == "" || id == s.NodeID() { - nodeInfo, err = s.getSelfNode() - } else { - nodeInfo, err = s.getPeerNode(id) +func (s *GRPCService) getPeerNode(id string) (*index.Node, error) { + if _, exist := s.peerClients[id]; !exist { + err := errors.New("node does not exist in peers") + s.logger.Debug(err.Error(), zap.String("id", id)) + return nil, err } + node, err := s.peerClients[id].NodeInfo() if err != nil { - s.logger.Error(err.Error()) - return nil, err + s.logger.Debug(err.Error(), zap.String("id", id)) + return &index.Node{ + BindAddress: "", + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: "", + HttpAddress: "", + }, + }, nil } - return nodeInfo, nil + return node, nil } -func (s *GRPCService) GetNode(ctx context.Context, req *index.GetNodeRequest) (*index.GetNodeResponse, error) { - resp := &index.GetNodeResponse{} +func (s *GRPCService) getNode(id string) (*index.Node, error) { + if id == "" || id == s.NodeID() { + return s.getSelfNode(), nil + } else { + return s.getPeerNode(id) + } +} + +func (s *GRPCService) NodeInfo(ctx context.Context, req *empty.Empty) (*index.NodeInfoResponse, error) { + resp := &index.NodeInfoResponse{} - nodeInfo, err := s.getNode(req.Id) + node, err := s.getNode(s.NodeID()) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) } - nodeConfigAny := &any.Any{} - if nodeConfig, exist := nodeInfo["node_config"]; exist { - err = protobuf.UnmarshalAny(nodeConfig.(map[string]interface{}), nodeConfigAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - } else { - s.logger.Error("missing node_config", zap.Any("node_config", nodeConfig)) - } - - state, exist := nodeInfo["state"].(string) - if !exist { - s.logger.Error("missing node state", zap.String("state", state)) - state = raft.Shutdown.String() - } - - resp.NodeConfig = nodeConfigAny - resp.State = state - - return resp, nil + return &index.NodeInfoResponse{ + Node: node, + }, nil } -func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) error { +func (s *GRPCService) setNode(node *index.Node) error { if s.raftServer.IsLeader() { - err := s.raftServer.SetNode(id, nodeConfig) + err := s.raftServer.SetNode(node) if err != nil { s.logger.Error(err.Error()) return err @@ -631,7 +641,7 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro s.logger.Error(err.Error()) return err } - err = client.SetNode(id, nodeConfig) + err = client.ClusterJoin(node) if err != nil { s.logger.Error(err.Error()) return err @@ -641,18 +651,10 @@ func (s *GRPCService) setNode(id string, nodeConfig map[string]interface{}) erro return nil } -func (s *GRPCService) SetNode(ctx context.Context, req *index.SetNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) ClusterJoin(ctx context.Context, req *index.ClusterJoinRequest) (*empty.Empty, error) { resp := &empty.Empty{} - ins, err := protobuf.MarshalAny(req.NodeConfig) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - nodeConfig := *ins.(*map[string]interface{}) - - err = s.setNode(req.Id, nodeConfig) + err := s.setNode(req.Node) if err != nil { s.logger.Error(err.Error()) return resp, status.Error(codes.Internal, err.Error()) @@ -675,7 +677,7 @@ func (s *GRPCService) deleteNode(id string) error { s.logger.Error(err.Error()) return err } - err = client.DeleteNode(id) + err = client.ClusterLeave(id) if err != nil { s.logger.Error(err.Error()) return err @@ -685,7 +687,7 @@ func (s *GRPCService) deleteNode(id string) error { return nil } -func (s *GRPCService) DeleteNode(ctx context.Context, req *index.DeleteNodeRequest) (*empty.Empty, error) { +func (s *GRPCService) ClusterLeave(ctx context.Context, req *index.ClusterLeaveRequest) (*empty.Empty, error) { resp := &empty.Empty{} err := s.deleteNode(req.Id) @@ -697,33 +699,28 @@ func (s *GRPCService) DeleteNode(ctx context.Context, req *index.DeleteNodeReque return resp, nil } -func (s *GRPCService) getCluster() (map[string]interface{}, error) { +func (s *GRPCService) getCluster() (*index.Cluster, error) { cluster, err := s.raftServer.GetCluster() if err != nil { s.logger.Error(err.Error()) return nil, err } - // update node state - for nodeId := range cluster { - node, err := s.getNode(nodeId) + // update latest node state + for id := range cluster.Nodes { + node, err := s.getNode(id) if err != nil { - s.logger.Error(err.Error()) - } - state := node["state"].(string) - - if _, ok := cluster[nodeId]; !ok { - cluster[nodeId] = map[string]interface{}{} + s.logger.Debug(err.Error()) + continue } - nodeInfo := cluster[nodeId].(map[string]interface{}) - nodeInfo["state"] = state + cluster.Nodes[id] = node } return cluster, nil } -func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*index.GetClusterResponse, error) { - resp := &index.GetClusterResponse{} +func (s *GRPCService) ClusterInfo(ctx context.Context, req *empty.Empty) (*index.ClusterInfoResponse, error) { + resp := &index.ClusterInfoResponse{} cluster, err := s.getCluster() if err != nil { @@ -731,20 +728,13 @@ func (s *GRPCService) GetCluster(ctx context.Context, req *empty.Empty) (*index. return resp, status.Error(codes.Internal, err.Error()) } - clusterAny := &any.Any{} - err = protobuf.UnmarshalAny(cluster, clusterAny) - if err != nil { - s.logger.Error(err.Error()) - return resp, status.Error(codes.Internal, err.Error()) - } - - resp.Cluster = clusterAny + resp.Cluster = cluster return resp, nil } -func (s *GRPCService) WatchCluster(req *empty.Empty, server index.Index_WatchClusterServer) error { - chans := make(chan index.GetClusterResponse) +func (s *GRPCService) ClusterWatch(req *empty.Empty, server index.Index_ClusterWatchServer) error { + chans := make(chan index.ClusterWatchResponse) s.clusterMutex.Lock() s.clusterChans[chans] = struct{}{} diff --git a/indexer/index.go b/indexer/index.go index f208a17..986c09b 100644 --- a/indexer/index.go +++ b/indexer/index.go @@ -19,14 +19,13 @@ import ( "os" "time" - "github.com/mosuka/blast/protobuf/index" - "github.com/blevesearch/bleve" "github.com/blevesearch/bleve/document" "github.com/golang/protobuf/ptypes/any" "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) diff --git a/indexer/raft_fsm.go b/indexer/raft_fsm.go index 5be21e4..3d6bfc9 100644 --- a/indexer/raft_fsm.go +++ b/indexer/raft_fsm.go @@ -21,15 +21,13 @@ import ( "io/ioutil" "sync" - "github.com/mosuka/blast/protobuf/index" - "github.com/blevesearch/bleve" "github.com/golang/protobuf/proto" "github.com/hashicorp/raft" "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" - "github.com/mosuka/blast/maputils" "github.com/mosuka/blast/protobuf" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) @@ -38,8 +36,8 @@ type RaftFSM struct { indexConfig *config.IndexConfig logger *zap.Logger - metadata maputils.Map - metadataMutex sync.RWMutex + cluster *index.Cluster + clusterMutex sync.RWMutex index *Index } @@ -53,10 +51,11 @@ func NewRaftFSM(path string, indexConfig *config.IndexConfig, logger *zap.Logger } func (f *RaftFSM) Start() error { - var err error - - f.metadata = maputils.Map{} + f.logger.Info("initialize cluster") + f.cluster = &index.Cluster{Nodes: make(map[string]*index.Node, 0)} + f.logger.Info("initialize index") + var err error f.index, err = NewIndex(f.path, f.indexConfig, f.logger) if err != nil { f.logger.Error(err.Error()) @@ -67,6 +66,7 @@ func (f *RaftFSM) Start() error { } func (f *RaftFSM) Stop() error { + f.logger.Info("close index") err := f.index.Close() if err != nil { f.logger.Error(err.Error()) @@ -76,45 +76,37 @@ func (f *RaftFSM) Stop() error { return nil } -func (f *RaftFSM) GetNodeConfig(nodeId string) (map[string]interface{}, error) { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() +func (f *RaftFSM) GetNode(nodeId string) (*index.Node, error) { + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - nodeConfig, err := f.metadata.Get(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - if err == maputils.ErrNotFound { - return nil, blasterrors.ErrNotFound - } - return nil, err + node, ok := f.cluster.Nodes[nodeId] + if !ok { + return nil, blasterrors.ErrNotFound } - return nodeConfig.(maputils.Map).ToMap(), nil + return node, nil } -func (f *RaftFSM) SetNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() +func (f *RaftFSM) SetNode(node *index.Node) error { + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - err := f.metadata.Merge(nodeId, nodeConfig) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - return err - } + f.cluster.Nodes[node.Id] = node return nil } -func (f *RaftFSM) DeleteNodeConfig(nodeId string) error { - f.metadataMutex.RLock() - defer f.metadataMutex.RUnlock() +func (f *RaftFSM) DeleteNode(nodeId string) error { + f.clusterMutex.RLock() + defer f.clusterMutex.RUnlock() - err := f.metadata.Delete(nodeId) - if err != nil { - f.logger.Error(err.Error(), zap.String("node_id", nodeId)) - return err + if _, ok := f.cluster.Nodes[nodeId]; !ok { + return blasterrors.ErrNotFound } + delete(f.cluster.Nodes, nodeId) + return nil } @@ -215,7 +207,22 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetNodeConfig(data["node_id"].(string), data["node_config"].(map[string]interface{})) + b, err := json.Marshal(data["node"]) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } + var node *index.Node + err = json.Unmarshal(b, &node) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } + err = f.SetNode(node) + if err != nil { + f.logger.Error(err.Error()) + return &fsmResponse{error: err} + } return &fsmResponse{error: err} case deleteNode: var data map[string]interface{} @@ -224,7 +231,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.DeleteNodeConfig(data["node_id"].(string)) + err = f.DeleteNode(data["id"].(string)) return &fsmResponse{error: err} case indexDocument: var data []map[string]interface{} diff --git a/indexer/raft_server.go b/indexer/raft_server.go index 5d8d1c8..c2fa628 100644 --- a/indexer/raft_server.go +++ b/indexer/raft_server.go @@ -31,33 +31,38 @@ import ( "github.com/mosuka/blast/config" blasterrors "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" //raftmdb "github.com/hashicorp/raft-mdb" ) type RaftServer struct { - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - bootstrap bool - logger *zap.Logger + node *index.Node + dataDir string + raftStorageType string + indexConfig *config.IndexConfig + bootstrap bool + logger *zap.Logger raft *raft.Raft fsm *RaftFSM } -func NewRaftServer(nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { +func NewRaftServer(node *index.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ - nodeConfig: nodeConfig, - indexConfig: indexConfig, - bootstrap: bootstrap, - logger: logger, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexConfig: indexConfig, + bootstrap: bootstrap, + logger: logger, }, nil } func (s *RaftServer) Start() error { var err error - fsmPath := filepath.Join(s.nodeConfig.DataDir, "index") + fsmPath := filepath.Join(s.dataDir, "index") s.logger.Info("create finite state machine", zap.String("path", fsmPath)) s.fsm, err = NewRaftFSM(fsmPath, s.indexConfig, s.logger) if err != nil { @@ -72,27 +77,30 @@ func (s *RaftServer) Start() error { return err } - s.logger.Info("create Raft config", zap.String("node_id", s.nodeConfig.NodeId)) + s.logger.Info("create Raft config", zap.String("id", s.node.Id)) raftConfig := raft.DefaultConfig() - raftConfig.LocalID = raft.ServerID(s.nodeConfig.NodeId) + raftConfig.LocalID = raft.ServerID(s.node.Id) raftConfig.SnapshotThreshold = 1024 raftConfig.LogOutput = ioutil.Discard + //if s.bootstrap { + // raftConfig.StartAsLeader = true + //} - s.logger.Info("resolve TCP address", zap.String("bind_addr", s.nodeConfig.BindAddr)) - addr, err := net.ResolveTCPAddr("tcp", s.nodeConfig.BindAddr) + s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) + addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) if err != nil { s.logger.Fatal(err.Error()) return err } - s.logger.Info("create TCP transport", zap.String("bind_addr", s.nodeConfig.BindAddr)) - transport, err := raft.NewTCPTransport(s.nodeConfig.BindAddr, addr, 3, 10*time.Second, ioutil.Discard) + s.logger.Info("create TCP transport", zap.String("bind_addr", s.node.BindAddress)) + transport, err := raft.NewTCPTransport(s.node.BindAddress, addr, 3, 10*time.Second, ioutil.Discard) if err != nil { s.logger.Fatal(err.Error()) return err } - snapshotPath := s.nodeConfig.DataDir + snapshotPath := s.dataDir s.logger.Info("create snapshot store", zap.String("path", snapshotPath)) snapshotStore, err := raft.NewFileSnapshotStore(snapshotPath, 2, ioutil.Discard) if err != nil { @@ -103,10 +111,10 @@ func (s *RaftServer) Start() error { s.logger.Info("create Raft machine") var logStore raft.LogStore var stableStore raft.StableStore - switch s.nodeConfig.RaftStorageType { + switch s.raftStorageType { case "boltdb": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(logStorePath), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -117,8 +125,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) stableStore, err = raftboltdb.NewBoltStore(stableStorePath) if err != nil { @@ -126,8 +134,8 @@ func (s *RaftServer) Start() error { return err } case "badger": - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Join(logStorePath, "badger"), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -138,8 +146,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Join(stableStorePath, "badger"), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -151,8 +159,8 @@ func (s *RaftServer) Start() error { return err } default: - logStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "log", "boltdb.db") - s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + logStorePath := filepath.Join(s.dataDir, "raft", "log", "boltdb.db") + s.logger.Info("create raft log store", zap.String("path", logStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(logStorePath), 0755) if err != nil { s.logger.Fatal(err.Error()) @@ -163,8 +171,8 @@ func (s *RaftServer) Start() error { s.logger.Fatal(err.Error()) return err } - stableStorePath := filepath.Join(s.nodeConfig.DataDir, "raft", "stable", "boltdb.db") - s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.nodeConfig.RaftStorageType)) + stableStorePath := filepath.Join(s.dataDir, "raft", "stable", "boltdb.db") + s.logger.Info("create raft stable store", zap.String("path", stableStorePath), zap.String("raft_storage_type", s.raftStorageType)) err = os.MkdirAll(filepath.Dir(stableStorePath), 0755) stableStore, err = raftboltdb.NewBoltStore(stableStorePath) if err != nil { @@ -200,11 +208,11 @@ func (s *RaftServer) Start() error { } // set node config - s.logger.Info("register its own information", zap.String("node_id", s.nodeConfig.NodeId), zap.Any("node_config", s.nodeConfig)) - err = s.setNodeConfig(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) + s.logger.Info("register its own node config", zap.Any("node", s.node)) + err = s.setNode(s.node) if err != nil { s.logger.Fatal(err.Error()) - return nil + return err } } @@ -230,17 +238,6 @@ func (s *RaftServer) Stop() error { return nil } -func (s *RaftServer) raftServers() ([]raft.Server, error) { - cf := s.raft.GetConfiguration() - err := cf.Error() - if err != nil { - s.logger.Error(err.Error()) - return nil, err - } - - return cf.Configuration().Servers, nil -} - func (s *RaftServer) LeaderAddress(timeout time.Duration) (raft.ServerAddress, error) { ticker := time.NewTicker(100 * time.Millisecond) defer ticker.Stop() @@ -270,13 +267,14 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { return "", err } - servers, err := s.raftServers() + cf := s.raft.GetConfiguration() + err = cf.Error() if err != nil { s.logger.Error(err.Error()) return "", err } - for _, server := range servers { + for _, server := range cf.Configuration().Servers { if server.Address == leaderAddr { return server.ID, nil } @@ -287,7 +285,7 @@ func (s *RaftServer) LeaderID(timeout time.Duration) (raft.ServerID, error) { } func (s *RaftServer) NodeID() string { - return s.nodeConfig.NodeId + return s.node.Id } func (s *RaftServer) Stats() map[string]string { @@ -312,99 +310,98 @@ func (s *RaftServer) WaitForDetectLeader(timeout time.Duration) error { return nil } -func (s *RaftServer) getNodeConfig(nodeId string) (map[string]interface{}, error) { - nodeConfig, err := s.fsm.GetNodeConfig(nodeId) +func (s *RaftServer) getNode(nodeId string) (*index.Node, error) { + nodeConfig, err := s.fsm.GetNode(nodeId) if err != nil { - s.logger.Error(err.Error()) + s.logger.Debug(err.Error(), zap.String("id", nodeId)) return nil, err } return nodeConfig, nil } -func (s *RaftServer) setNodeConfig(nodeId string, nodeConfig map[string]interface{}) error { +func (s *RaftServer) setNode(node *index.Node) error { msg, err := newMessage( setNode, map[string]interface{}{ - "node_id": nodeId, - "node_config": nodeConfig, + "node": node, }, ) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } msgBytes, err := json.Marshal(msg) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } err = f.Response().(*fsmResponse).error if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } return nil } -func (s *RaftServer) deleteNodeConfig(nodeId string) error { +func (s *RaftServer) deleteNode(nodeId string) error { msg, err := newMessage( deleteNode, map[string]interface{}{ - "node_id": nodeId, + "id": nodeId, }, ) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } msgBytes, err := json.Marshal(msg) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } f := s.raft.Apply(msgBytes, 10*time.Second) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } err = f.Response().(*fsmResponse).error if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } return nil } -func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { - servers, err := s.raftServers() +func (s *RaftServer) GetNode(id string) (*index.Node, error) { + cf := s.raft.GetConfiguration() + err := cf.Error() if err != nil { s.logger.Error(err.Error()) return nil, err } - node := make(map[string]interface{}, 0) - for _, server := range servers { + var node *index.Node + for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(id) { - nodeConfig, err := s.getNodeConfig(id) + node, err = s.getNode(id) if err != nil { - s.logger.Error(err.Error()) + s.logger.Debug(err.Error(), zap.String("id", id)) return nil, err } - node["node_config"] = nodeConfig break } } @@ -412,44 +409,45 @@ func (s *RaftServer) GetNode(id string) (map[string]interface{}, error) { return node, nil } -func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) error { +func (s *RaftServer) SetNode(node *index.Node) error { if !s.IsLeader() { s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } - servers, err := s.raftServers() + cf := s.raft.GetConfiguration() + err := cf.Error() if err != nil { s.logger.Error(err.Error()) return err } - for _, server := range servers { - if server.ID == raft.ServerID(nodeId) { - s.logger.Info("node already joined the cluster", zap.String("id", nodeId)) + for _, server := range cf.Configuration().Servers { + if server.ID == raft.ServerID(node.Id) { + s.logger.Info("node already joined the cluster", zap.Any("id", node.Id)) return nil } } - bindAddr, ok := nodeConfig["bind_addr"].(string) - if !ok { - s.logger.Error("missing metadata", zap.String("bind_addr", bindAddr)) - return errors.New("missing metadata") + if node.BindAddress == "" { + err = errors.New("missing bind address") + s.logger.Error(err.Error(), zap.String("bind_addr", node.BindAddress)) + return err } // add node to Raft cluster - s.logger.Info("add voter", zap.String("nodeId", nodeId), zap.String("address", bindAddr)) - f := s.raft.AddVoter(raft.ServerID(nodeId), raft.ServerAddress(bindAddr), 0, 0) + s.logger.Info("join the node to the raft cluster", zap.String("id", node.Id), zap.Any("bind_address", node.BindAddress)) + f := s.raft.AddVoter(raft.ServerID(node.Id), raft.ServerAddress(node.BindAddress), 0, 0) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", node.Id), zap.String("bind_address", node.BindAddress)) return err } // set node config - err = s.setNodeConfig(nodeId, nodeConfig) + err = s.setNode(node) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.Any("node", node)) return err } @@ -458,54 +456,57 @@ func (s *RaftServer) SetNode(nodeId string, nodeConfig map[string]interface{}) e func (s *RaftServer) DeleteNode(nodeId string) error { if !s.IsLeader() { - s.logger.Warn(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) + s.logger.Error(raft.ErrNotLeader.Error(), zap.String("state", s.raft.State().String())) return raft.ErrNotLeader } - servers, err := s.raftServers() + cf := s.raft.GetConfiguration() + err := cf.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } // delete node from Raft cluster - for _, server := range servers { + for _, server := range cf.Configuration().Servers { if server.ID == raft.ServerID(nodeId) { - s.logger.Debug("remove server", zap.String("node_id", nodeId)) + s.logger.Info("remove the node from the raft cluster", zap.String("id", nodeId)) f := s.raft.RemoveServer(server.ID, 0, 0) err = f.Error() if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", string(server.ID))) return err } } } // delete node config - err = s.deleteNodeConfig(nodeId) + err = s.deleteNode(nodeId) if err != nil { - s.logger.Error(err.Error()) + s.logger.Error(err.Error(), zap.String("id", nodeId)) return err } return nil } -func (s *RaftServer) GetCluster() (map[string]interface{}, error) { - servers, err := s.raftServers() +func (s *RaftServer) GetCluster() (*index.Cluster, error) { + cf := s.raft.GetConfiguration() + err := cf.Error() if err != nil { s.logger.Error(err.Error()) return nil, err } - cluster := map[string]interface{}{} - for _, server := range servers { + cluster := &index.Cluster{Nodes: make(map[string]*index.Node, 0)} + for _, server := range cf.Configuration().Servers { node, err := s.GetNode(string(server.ID)) if err != nil { - s.logger.Warn(err.Error()) - node = map[string]interface{}{} + s.logger.Debug(err.Error(), zap.String("id", string(server.ID))) + continue } - cluster[string(server.ID)] = node + + cluster.Nodes[string(server.ID)] = node } return cluster, nil diff --git a/indexer/server.go b/indexer/server.go index 8813a48..1ffa188 100644 --- a/indexer/server.go +++ b/indexer/server.go @@ -15,22 +15,28 @@ package indexer import ( + "encoding/json" "fmt" accesslog "github.com/mash/go-accesslog" "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/manager" + "github.com/mosuka/blast/protobuf/index" "go.uber.org/zap" ) type Server struct { - clusterConfig *config.ClusterConfig - nodeConfig *config.NodeConfig - indexConfig *config.IndexConfig - logger *zap.Logger - grpcLogger *zap.Logger - httpLogger accesslog.Logger + managerGrpcAddress string + shardId string + peerGrpcAddress string + node *index.Node + dataDir string + raftStorageType string + indexConfig *config.IndexConfig + logger *zap.Logger + grpcLogger *zap.Logger + httpLogger accesslog.Logger raftServer *RaftServer grpcService *GRPCService @@ -39,23 +45,27 @@ type Server struct { httpServer *HTTPServer } -func NewServer(clusterConfig *config.ClusterConfig, nodeConfig *config.NodeConfig, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { +func NewServer(managerGrpcAddress string, shardId string, peerGrpcAddress string, node *index.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, logger *zap.Logger, grpcLogger *zap.Logger, httpLogger accesslog.Logger) (*Server, error) { return &Server{ - clusterConfig: clusterConfig, - nodeConfig: nodeConfig, - indexConfig: indexConfig, - logger: logger, - grpcLogger: grpcLogger, - httpLogger: httpLogger, + managerGrpcAddress: managerGrpcAddress, + shardId: shardId, + peerGrpcAddress: peerGrpcAddress, + node: node, + dataDir: dataDir, + raftStorageType: raftStorageType, + indexConfig: indexConfig, + logger: logger, + grpcLogger: grpcLogger, + httpLogger: httpLogger, }, nil } func (s *Server) Start() { // get peer from manager - if s.clusterConfig.ManagerAddr != "" { - s.logger.Info("connect to manager", zap.String("manager_addr", s.clusterConfig.ManagerAddr)) + if s.managerGrpcAddress != "" { + s.logger.Info("connect to manager", zap.String("manager_grpc_addr", s.managerGrpcAddress)) - mc, err := manager.NewGRPCClient(s.clusterConfig.ManagerAddr) + mc, err := manager.NewGRPCClient(s.managerGrpcAddress) defer func() { s.logger.Debug("close client", zap.String("address", mc.GetAddress())) err = mc.Close() @@ -69,45 +79,41 @@ func (s *Server) Start() { return } - clusterIntr, err := mc.Get(fmt.Sprintf("cluster_config/clusters/%s/nodes", s.clusterConfig.ClusterId)) + clusterIntr, err := mc.Get(fmt.Sprintf("cluster/shards/%s", s.shardId)) if err != nil && err != errors.ErrNotFound { s.logger.Fatal(err.Error()) return } if clusterIntr != nil { - cluster := *clusterIntr.(*map[string]interface{}) - for nodeId, nodeInfoIntr := range cluster { - if nodeId == s.nodeConfig.NodeId { - s.logger.Debug("skip own node id", zap.String("node_id", nodeId)) - continue - } - - nodeInfo := nodeInfoIntr.(map[string]interface{}) + b, err := json.Marshal(clusterIntr) + if err != nil { + s.logger.Fatal(err.Error()) + return + } - // get the peer node config - nodeConfig, ok := nodeInfo["node_config"].(map[string]interface{}) - if !ok { - s.logger.Error("missing node config", zap.String("node_id", nodeId), zap.Any("node_config", nodeConfig)) - continue - } + var cluster *index.Cluster + err = json.Unmarshal(b, &cluster) + if err != nil { + s.logger.Fatal(err.Error()) + return + } - // get the peer node gRPC address - grpcAddr, ok := nodeConfig["grpc_addr"].(string) - if !ok { - s.logger.Error("missing gRPC address", zap.String("id", nodeId), zap.String("grpc_addr", grpcAddr)) + for id, node := range cluster.Nodes { + if id == s.node.Id { + s.logger.Debug("skip own node id", zap.String("id", id)) continue } - s.logger.Info("peer node detected", zap.String("peer_addr", grpcAddr)) - s.clusterConfig.PeerAddr = grpcAddr + s.logger.Info("peer node detected", zap.String("peer_grpc_addr", node.Metadata.GrpcAddress)) + s.peerGrpcAddress = node.Metadata.GrpcAddress break } } } //get index config from manager or peer - if s.clusterConfig.ManagerAddr != "" { - mc, err := manager.NewGRPCClient(s.clusterConfig.ManagerAddr) + if s.managerGrpcAddress != "" { + mc, err := manager.NewGRPCClient(s.managerGrpcAddress) defer func() { s.logger.Debug("close client", zap.String("address", mc.GetAddress())) err = mc.Close() @@ -131,8 +137,8 @@ func (s *Server) Start() { if value != nil { s.indexConfig = config.NewIndexConfigFromMap(*value.(*map[string]interface{})) } - } else if s.clusterConfig.PeerAddr != "" { - pc, err := NewGRPCClient(s.clusterConfig.PeerAddr) + } else if s.peerGrpcAddress != "" { + pc, err := NewGRPCClient(s.peerGrpcAddress) defer func() { s.logger.Debug("close client", zap.String("address", pc.GetAddress())) err = pc.Close() @@ -159,41 +165,41 @@ func (s *Server) Start() { } // bootstrap node? - bootstrap := s.clusterConfig.PeerAddr == "" + bootstrap := s.peerGrpcAddress == "" s.logger.Info("bootstrap", zap.Bool("bootstrap", bootstrap)) var err error // create raft server - s.raftServer, err = NewRaftServer(s.nodeConfig, s.indexConfig, bootstrap, s.logger) + s.raftServer, err = NewRaftServer(s.node, s.dataDir, s.raftStorageType, s.indexConfig, bootstrap, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create gRPC service - s.grpcService, err = NewGRPCService(s.clusterConfig, s.raftServer, s.logger) + s.grpcService, err = NewGRPCService(s.managerGrpcAddress, s.shardId, s.raftServer, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create gRPC server - s.grpcServer, err = NewGRPCServer(s.nodeConfig.GRPCAddr, s.grpcService, s.grpcLogger) + s.grpcServer, err = NewGRPCServer(s.node.Metadata.GrpcAddress, s.grpcService, s.grpcLogger) if err != nil { s.logger.Fatal(err.Error()) return } // create HTTP router - s.httpRouter, err = NewRouter(s.nodeConfig.GRPCAddr, s.logger) + s.httpRouter, err = NewRouter(s.node.Metadata.GrpcAddress, s.logger) if err != nil { s.logger.Fatal(err.Error()) return } // create HTTP server - s.httpServer, err = NewHTTPServer(s.nodeConfig.HTTPAddr, s.httpRouter, s.logger, s.httpLogger) + s.httpServer, err = NewHTTPServer(s.node.Metadata.HttpAddress, s.httpRouter, s.logger, s.httpLogger) if err != nil { s.logger.Fatal(err.Error()) return @@ -235,7 +241,7 @@ func (s *Server) Start() { // join to the existing cluster if !bootstrap { - client, err := NewGRPCClient(s.clusterConfig.PeerAddr) + client, err := NewGRPCClient(s.peerGrpcAddress) defer func() { err := client.Close() if err != nil { @@ -247,7 +253,7 @@ func (s *Server) Start() { return } - err = client.SetNode(s.nodeConfig.NodeId, s.nodeConfig.ToMap()) + err = client.ClusterJoin(s.node) if err != nil { s.logger.Fatal(err.Error()) return diff --git a/indexer/server_test.go b/indexer/server_test.go index 58071bc..b527382 100644 --- a/indexer/server_test.go +++ b/indexer/server_test.go @@ -16,6 +16,7 @@ package indexer import ( "encoding/json" + "fmt" "io/ioutil" "os" "path/filepath" @@ -23,9 +24,9 @@ import ( "testing" "time" + "github.com/mosuka/blast/strutils" + "github.com/blevesearch/bleve" - "github.com/hashicorp/raft" - "github.com/mosuka/blast/config" "github.com/mosuka/blast/errors" "github.com/mosuka/blast/indexutils" "github.com/mosuka/blast/logutils" @@ -36,31 +37,39 @@ import ( func TestServer_Start(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -78,31 +87,39 @@ func TestServer_Start(t *testing.T) { func TestServer_LivenessProbe(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -117,7 +134,7 @@ func TestServer_LivenessProbe(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -130,79 +147,34 @@ func TestServer_LivenessProbe(t *testing.T) { t.Fatalf("%v", err) } - // liveness - liveness, err := client.LivenessProbe() + // healthiness + healthiness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness := index.LivenessProbeResponse_ALIVE.String() - actLiveness := liveness - if expLiveness != actLiveness { - t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) - } -} - -func TestServer_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") - if err != nil { - t.Fatalf("%v", err) + expHealthiness := index.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness := healthiness + if expHealthiness != actHealthiness { + t.Fatalf("expected content to see %v, saw %v", expHealthiness, actHealthiness) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) - defer func() { - server.Stop() - }() + // liveness + liveness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - - // start server - server.Start() - - // sleep - time.Sleep(5 * time.Second) - - // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) - defer func() { - if client != nil { - err = client.Close() - if err != nil { - t.Fatalf("%v", err) - } - } - }() - if err != nil { - t.Fatalf("%v", err) + expLiveness := index.NodeHealthCheckResponse_ALIVE.String() + actLiveness := liveness + if expLiveness != actLiveness { + t.Fatalf("expected content to see %v, saw %v", expLiveness, actLiveness) } // readiness - readiness, err := client.ReadinessProbe() + readiness, err := client.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness := index.ReadinessProbeResponse_READY.String() + expReadiness := index.NodeHealthCheckResponse_READY.String() actReadiness := readiness if expReadiness != actReadiness { t.Fatalf("expected content to see %v, saw %v", expReadiness, actReadiness) @@ -212,31 +184,39 @@ func TestServer_ReadinessProbe(t *testing.T) { func TestServer_GetNode(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -251,7 +231,7 @@ func TestServer_GetNode(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -265,48 +245,61 @@ func TestServer_GetNode(t *testing.T) { } // get node - node, err := client.GetNode(nodeConfig.NodeId) + nodeInfo, err := client.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode := map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", + expNodeInfo := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, } - actNode := node - if !reflect.DeepEqual(expNode, actNode) { - t.Fatalf("expected content to see %v, saw %v", expNode, actNode) + actNodeInfo := nodeInfo + if !reflect.DeepEqual(expNodeInfo, actNodeInfo) { + t.Fatalf("expected content to see %v, saw %v", expNodeInfo, actNodeInfo) } } func TestServer_GetCluster(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -321,7 +314,7 @@ func TestServer_GetCluster(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -335,14 +328,21 @@ func TestServer_GetCluster(t *testing.T) { } // get cluster - cluster, err := client.GetCluster() + cluster, err := client.ClusterInfo() if err != nil { t.Fatalf("%v", err) } - expCluster := map[string]interface{}{ - nodeConfig.NodeId: map[string]interface{}{ - "node_config": nodeConfig.ToMap(), - "state": "Leader", + expCluster := &index.Cluster{ + Nodes: map[string]*index.Node{ + nodeId: { + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + }, }, } actCluster := cluster @@ -354,31 +354,39 @@ func TestServer_GetCluster(t *testing.T) { func TestServer_GetIndexMapping(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -393,7 +401,7 @@ func TestServer_GetIndexMapping(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -429,31 +437,39 @@ func TestServer_GetIndexMapping(t *testing.T) { func TestServer_GetIndexType(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -468,7 +484,7 @@ func TestServer_GetIndexType(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -501,31 +517,39 @@ func TestServer_GetIndexType(t *testing.T) { func TestServer_GetIndexStorageType(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -540,7 +564,7 @@ func TestServer_GetIndexStorageType(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -573,31 +597,39 @@ func TestServer_GetIndexStorageType(t *testing.T) { func TestServer_GetIndexStats(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -612,7 +644,7 @@ func TestServer_GetIndexStats(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -654,31 +686,39 @@ func TestServer_GetIndexStats(t *testing.T) { func TestServer_PutDocument(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -693,7 +733,7 @@ func TestServer_PutDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -747,31 +787,39 @@ func TestServer_PutDocument(t *testing.T) { func TestServer_GetDocument(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -786,7 +834,7 @@ func TestServer_GetDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -855,31 +903,39 @@ func TestServer_GetDocument(t *testing.T) { func TestServer_DeleteDocument(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -894,7 +950,7 @@ func TestServer_DeleteDocument(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -992,31 +1048,39 @@ func TestServer_DeleteDocument(t *testing.T) { func TestServer_Search(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create cluster config - clusterConfig := config.DefaultClusterConfig() - - // create node config - nodeConfig := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig.DataDir) - }() + managerGrpcAddress := "" + shardId := "" + peerGrpcAddress := "" + grpcAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() + raftStorageType := "boltdb" + + node := &index.Node{ + Id: nodeId, + BindAddress: bindAddress, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress, + HttpAddress: httpAddress, + }, + } - // create index config indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - server, err := NewServer(clusterConfig, nodeConfig, indexConfig, logger, grpcLogger, httpAccessLogger) + server, err := NewServer(managerGrpcAddress, shardId, peerGrpcAddress, node, dataDir, raftStorageType, indexConfig, logger, grpcLogger, httpAccessLogger) defer func() { server.Stop() }() @@ -1031,7 +1095,7 @@ func TestServer_Search(t *testing.T) { time.Sleep(5 * time.Second) // create gRPC client - client, err := NewGRPCClient(nodeConfig.GRPCAddr) + client, err := NewGRPCClient(node.Metadata.GrpcAddress) defer func() { if client != nil { err = client.Close() @@ -1117,78 +1181,125 @@ func TestServer_Search(t *testing.T) { func TestCluster_Start(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerGrpcAddress1 := "" + shardId1 := "" + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() + raftStorageType1 := "boltdb" + + node1 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - if server1 != nil { - server1.Stop() - } + server1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) + managerGrpcAddress2 := "" + shardId2 := "" + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() + raftStorageType2 := "boltdb" + + node2 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if server2 != nil { - server2.Stop() - } + server2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) + managerGrpcAddress3 := "" + shardId3 := "" + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() + raftStorageType3 := "boltdb" + + node3 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if server3 != nil { - server3.Stop() - } + server3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep @@ -1198,99 +1309,146 @@ func TestCluster_Start(t *testing.T) { func TestCluster_LivenessProbe(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerGrpcAddress1 := "" + shardId1 := "" + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() + raftStorageType1 := "boltdb" + + node1 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } + server1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) + managerGrpcAddress2 := "" + shardId2 := "" + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() + raftStorageType2 := "boltdb" + + node2 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if server2 != nil { - server2.Stop() - } + server2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) + managerGrpcAddress3 := "" + shardId3 := "" + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() + raftStorageType3 := "boltdb" + + node3 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if server3 != nil { - server3.Stop() - } + server3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1298,171 +1456,100 @@ func TestCluster_LivenessProbe(t *testing.T) { t.Fatalf("%v", err) } - // liveness check for server1 - liveness1, err := client1.LivenessProbe() + // healthiness + healthiness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness1 := index.LivenessProbeResponse_ALIVE.String() - actLiveness1 := liveness1 - if expLiveness1 != actLiveness1 { - t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) + expHealthiness1 := index.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness1 := healthiness1 + if expHealthiness1 != actHealthiness1 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness1, actHealthiness1) } - // liveness check for server2 - liveness2, err := client2.LivenessProbe() + // liveness + liveness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness2 := index.LivenessProbeResponse_ALIVE.String() - actLiveness2 := liveness2 - if expLiveness2 != actLiveness2 { - t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) + expLiveness1 := index.NodeHealthCheckResponse_ALIVE.String() + actLiveness1 := liveness1 + if expLiveness1 != actLiveness1 { + t.Fatalf("expected content to see %v, saw %v", expLiveness1, actLiveness1) } - // liveness check for server3 - liveness3, err := client3.LivenessProbe() + // readiness + readiness1, err := client1.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expLiveness3 := index.LivenessProbeResponse_ALIVE.String() - actLiveness3 := liveness3 - if expLiveness3 != actLiveness3 { - t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) + expReadiness1 := index.NodeHealthCheckResponse_READY.String() + actReadiness1 := readiness1 + if expReadiness1 != actReadiness1 { + t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) } -} -func TestCluster_ReadinessProbe(t *testing.T) { - curDir, _ := os.Getwd() - - // create logger - logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger - grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger - httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + // healthiness + healthiness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) + expHealthiness2 := index.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness2 := healthiness2 + if expHealthiness2 != actHealthiness2 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness2, actHealthiness2) } - // start server1 - server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) - defer func() { - if server2 != nil { - server2.Stop() - } - }() + // liveness + liveness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - // start server2 - server2.Start() - - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) - defer func() { - if server3 != nil { - server3.Stop() - } - }() - if err != nil { - t.Fatalf("%v", err) + expLiveness2 := index.NodeHealthCheckResponse_ALIVE.String() + actLiveness2 := liveness2 + if expLiveness2 != actLiveness2 { + t.Fatalf("expected content to see %v, saw %v", expLiveness2, actLiveness2) } - // start server3 - server3.Start() - - // sleep - time.Sleep(5 * time.Second) - // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) - defer func() { - _ = client1.Close() - }() - if err != nil { - t.Fatalf("%v", err) - } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) - defer func() { - _ = client2.Close() - }() + // readiness + readiness2, err := client2.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) - defer func() { - _ = client3.Close() - }() - if err != nil { - t.Fatalf("%v", err) + expReadiness2 := index.NodeHealthCheckResponse_READY.String() + actReadiness2 := readiness2 + if expReadiness2 != actReadiness2 { + t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) } - // readiness check for server1 - readiness1, err := client1.ReadinessProbe() + // healthiness + healthiness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_HEALTHINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness1 := index.ReadinessProbeResponse_READY.String() - actReadiness1 := readiness1 - if expReadiness1 != actReadiness1 { - t.Fatalf("expected content to see %v, saw %v", expReadiness1, actReadiness1) + expHealthiness3 := index.NodeHealthCheckResponse_HEALTHY.String() + actHealthiness3 := healthiness3 + if expHealthiness3 != actHealthiness3 { + t.Fatalf("expected content to see %v, saw %v", expHealthiness3, actHealthiness3) } - // readiness check for server2 - readiness2, err := client2.ReadinessProbe() + // liveness + liveness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_LIVENESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness2 := index.ReadinessProbeResponse_READY.String() - actReadiness2 := readiness2 - if expReadiness2 != actReadiness2 { - t.Fatalf("expected content to see %v, saw %v", expReadiness2, actReadiness2) + expLiveness3 := index.NodeHealthCheckResponse_ALIVE.String() + actLiveness3 := liveness3 + if expLiveness3 != actLiveness3 { + t.Fatalf("expected content to see %v, saw %v", expLiveness3, actLiveness3) } - // readiness check for server3 - readiness3, err := client3.ReadinessProbe() + // readiness + readiness3, err := client3.NodeHealthCheck(index.NodeHealthCheckRequest_READINESS.String()) if err != nil { t.Fatalf("%v", err) } - expReadiness3 := index.ReadinessProbeResponse_READY.String() + expReadiness3 := index.NodeHealthCheckResponse_READY.String() actReadiness3 := readiness3 if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) @@ -1472,99 +1559,146 @@ func TestCluster_ReadinessProbe(t *testing.T) { func TestCluster_GetNode(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerGrpcAddress1 := "" + shardId1 := "" + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() + raftStorageType1 := "boltdb" + + node1 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - if server1 != nil { - server1.Stop() - } + server1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) + managerGrpcAddress2 := "" + shardId2 := "" + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() + raftStorageType2 := "boltdb" + + node2 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if server2 != nil { - server2.Stop() - } + server2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) + managerGrpcAddress3 := "" + shardId3 := "" + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() + raftStorageType3 := "boltdb" + + node3 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if server3 != nil { - server3.Stop() - } + server3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) // gRPC client for all servers - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1573,220 +1707,204 @@ func TestCluster_GetNode(t *testing.T) { } // get all node info from all nodes - node11, err := client1.GetNode(nodeConfig1.NodeId) + node11, err := client1.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode11 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode11 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, } actNode11 := node11 if !reflect.DeepEqual(expNode11, actNode11) { t.Fatalf("expected content to see %v, saw %v", expNode11, actNode11) } - node12, err := client1.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode12 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode12 := node12 - if !reflect.DeepEqual(expNode12, actNode12) { - t.Fatalf("expected content to see %v, saw %v", expNode12, actNode12) - } - - node13, err := client1.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode13 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode13 := node13 - if !reflect.DeepEqual(expNode13, actNode13) { - t.Fatalf("expected content to see %v, saw %v", expNode13, actNode13) - } - - node21, err := client2.GetNode(nodeConfig1.NodeId) + node21, err := client2.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode21 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode21 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, } actNode21 := node21 if !reflect.DeepEqual(expNode21, actNode21) { t.Fatalf("expected content to see %v, saw %v", expNode21, actNode21) } - node22, err := client2.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode22 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode22 := node22 - if !reflect.DeepEqual(expNode22, actNode22) { - t.Fatalf("expected content to see %v, saw %v", expNode22, actNode22) - } - - node23, err := client2.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode23 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode23 := node23 - if !reflect.DeepEqual(expNode23, actNode23) { - t.Fatalf("expected content to see %v, saw %v", expNode23, actNode23) - } - - node31, err := client3.GetNode(nodeConfig1.NodeId) + node31, err := client3.NodeInfo() if err != nil { t.Fatalf("%v", err) } - expNode31 := map[string]interface{}{ - "node_config": server1.nodeConfig.ToMap(), - "state": raft.Leader.String(), + expNode31 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, } actNode31 := node31 if !reflect.DeepEqual(expNode31, actNode31) { t.Fatalf("expected content to see %v, saw %v", expNode31, actNode31) } - - node32, err := client3.GetNode(nodeConfig2.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode32 := map[string]interface{}{ - "node_config": server2.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode32 := node32 - if !reflect.DeepEqual(expNode32, actNode32) { - t.Fatalf("expected content to see %v, saw %v", expNode32, actNode32) - } - - node33, err := client3.GetNode(nodeConfig3.NodeId) - if err != nil { - t.Fatalf("%v", err) - } - expNode33 := map[string]interface{}{ - "node_config": server3.nodeConfig.ToMap(), - "state": raft.Follower.String(), - } - actNode33 := node33 - if !reflect.DeepEqual(expNode33, actNode33) { - t.Fatalf("expected content to see %v, saw %v", expNode33, actNode33) - } } func TestCluster_GetCluster(t *testing.T) { curDir, _ := os.Getwd() - // create logger logger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create gRPC logger grpcLogger := logutils.NewLogger("WARN", "", 500, 3, 30, false) - - // create HTTP access logger httpAccessLogger := logutils.NewApacheCombinedLogger("", 500, 3, 30, false) - // create index config - indexConfig, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + managerGrpcAddress1 := "" + shardId1 := "" + peerGrpcAddress1 := "" + grpcAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() + raftStorageType1 := "boltdb" + + node1 := &index.Node{ + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + } + + indexConfig1, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") if err != nil { t.Fatalf("%v", err) } - // create configs for server1 - clusterConfig1 := config.DefaultClusterConfig() - nodeConfig1 := testutils.TmpNodeConfig() + server1, err := NewServer(managerGrpcAddress1, shardId1, peerGrpcAddress1, node1, dataDir1, raftStorageType1, indexConfig1, logger, grpcLogger, httpAccessLogger) defer func() { - _ = os.RemoveAll(nodeConfig1.DataDir) - }() - // create server1 - server1, err := NewServer(clusterConfig1, nodeConfig1, indexConfig, logger.Named("server1"), grpcLogger, httpAccessLogger) - defer func() { - if server1 != nil { - server1.Stop() - } + server1.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server1 + + // start server server1.Start() - // create configs for server2 - clusterConfig2 := config.DefaultClusterConfig() - clusterConfig2.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig2 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig2.DataDir) - }() - // create server2 - server2, err := NewServer(clusterConfig2, nodeConfig2, config.DefaultIndexConfig(), logger.Named("server2"), grpcLogger, httpAccessLogger) + managerGrpcAddress2 := "" + shardId2 := "" + peerGrpcAddress2 := grpcAddress1 + grpcAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() + raftStorageType2 := "boltdb" + + node2 := &index.Node{ + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + } + + indexConfig2, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server2, err := NewServer(managerGrpcAddress2, shardId2, peerGrpcAddress2, node2, dataDir2, raftStorageType2, indexConfig2, logger, grpcLogger, httpAccessLogger) defer func() { - if server2 != nil { - server2.Stop() - } + server2.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server2 + + // start server server2.Start() - // create configs for server3 - clusterConfig3 := config.DefaultClusterConfig() - clusterConfig3.PeerAddr = nodeConfig1.GRPCAddr - nodeConfig3 := testutils.TmpNodeConfig() - defer func() { - _ = os.RemoveAll(nodeConfig3.DataDir) - }() - // create server3 - server3, err := NewServer(clusterConfig3, nodeConfig3, config.DefaultIndexConfig(), logger.Named("server3"), grpcLogger, httpAccessLogger) + managerGrpcAddress3 := "" + shardId3 := "" + peerGrpcAddress3 := grpcAddress1 + grpcAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + httpAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) + bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) + dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() + raftStorageType3 := "boltdb" + + node3 := &index.Node{ + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_UNKNOWN, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + } + + indexConfig3, err := testutils.TmpIndexConfig(filepath.Join(curDir, "../example/wiki_index_mapping.json"), "upside_down", "boltdb") + if err != nil { + t.Fatalf("%v", err) + } + + server3, err := NewServer(managerGrpcAddress3, shardId3, peerGrpcAddress3, node3, dataDir3, raftStorageType3, indexConfig3, logger, grpcLogger, httpAccessLogger) defer func() { - if server3 != nil { - server3.Stop() - } + server3.Stop() }() if err != nil { t.Fatalf("%v", err) } - // start server3 + + // start server server3.Start() // sleep time.Sleep(5 * time.Second) - // gRPC client for manager1 - client1, err := NewGRPCClient(nodeConfig1.GRPCAddr) + // gRPC client for all servers + client1, err := NewGRPCClient(node1.Metadata.GrpcAddress) defer func() { _ = client1.Close() }() if err != nil { t.Fatalf("%v", err) } - client2, err := NewGRPCClient(nodeConfig2.GRPCAddr) + client2, err := NewGRPCClient(node2.Metadata.GrpcAddress) defer func() { _ = client2.Close() }() if err != nil { t.Fatalf("%v", err) } - client3, err := NewGRPCClient(nodeConfig3.GRPCAddr) + client3, err := NewGRPCClient(node3.Metadata.GrpcAddress) defer func() { _ = client3.Close() }() @@ -1794,23 +1912,40 @@ func TestCluster_GetCluster(t *testing.T) { t.Fatalf("%v", err) } - // get cluster info from all servers - cluster1, err := client1.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expCluster1 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + // get cluster info from manager1 + cluster1, err := client1.ClusterInfo() + if err != nil { + t.Fatalf("%v", err) + } + expCluster1 := &index.Cluster{ + Nodes: map[string]*index.Node{ + nodeId1: { + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster1 := cluster1 @@ -1818,22 +1953,39 @@ func TestCluster_GetCluster(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expCluster1, actCluster1) } - cluster2, err := client2.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expCluster2 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + cluster2, err := client2.ClusterInfo() + if err != nil { + t.Fatalf("%v", err) + } + expCluster2 := &index.Cluster{ + Nodes: map[string]*index.Node{ + nodeId1: { + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster2 := cluster2 @@ -1841,22 +1993,39 @@ func TestCluster_GetCluster(t *testing.T) { t.Fatalf("expected content to see %v, saw %v", expCluster2, actCluster2) } - cluster3, err := client3.GetCluster() - if err != nil { - t.Fatalf("%v", err) - } - expCluster3 := map[string]interface{}{ - nodeConfig1.NodeId: map[string]interface{}{ - "node_config": nodeConfig1.ToMap(), - "state": raft.Leader.String(), - }, - nodeConfig2.NodeId: map[string]interface{}{ - "node_config": nodeConfig2.ToMap(), - "state": raft.Follower.String(), - }, - nodeConfig3.NodeId: map[string]interface{}{ - "node_config": nodeConfig3.ToMap(), - "state": raft.Follower.String(), + cluster3, err := client3.ClusterInfo() + if err != nil { + t.Fatalf("%v", err) + } + expCluster3 := &index.Cluster{ + Nodes: map[string]*index.Node{ + nodeId1: { + Id: nodeId1, + BindAddress: bindAddress1, + State: index.Node_LEADER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress1, + HttpAddress: httpAddress1, + }, + }, + nodeId2: { + Id: nodeId2, + BindAddress: bindAddress2, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress2, + HttpAddress: httpAddress2, + }, + }, + nodeId3: { + Id: nodeId3, + BindAddress: bindAddress3, + State: index.Node_FOLLOWER, + Metadata: &index.Metadata{ + GrpcAddress: grpcAddress3, + HttpAddress: httpAddress3, + }, + }, }, } actCluster3 := cluster3 diff --git a/manager/grpc_client.go b/manager/grpc_client.go index af4c46e..6935724 100644 --- a/manager/grpc_client.go +++ b/manager/grpc_client.go @@ -45,6 +45,16 @@ func NewGRPCContext() (context.Context, context.CancelFunc) { func NewGRPCClient(address string) (*GRPCClient, error) { ctx, cancel := NewGRPCContext() + //streamRetryOpts := []grpc_retry.CallOption{ + // grpc_retry.Disable(), + //} + + //unaryRetryOpts := []grpc_retry.CallOption{ + // grpc_retry.WithBackoff(grpc_retry.BackoffLinear(100 * time.Millisecond)), + // grpc_retry.WithCodes(codes.Unavailable), + // grpc_retry.WithMax(100), + //} + dialOpts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithDefaultCallOptions( diff --git a/manager/grpc_service.go b/manager/grpc_service.go index c763412..47b65a2 100644 --- a/manager/grpc_service.go +++ b/manager/grpc_service.go @@ -204,10 +204,12 @@ func (s *GRPCService) startUpdateCluster(checkInterval time.Duration) { n1, err := json.Marshal(node) if err != nil { s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", node)) + continue } n2, err := json.Marshal(nodeSnapshot) if err != nil { s.logger.Warn(err.Error(), zap.String("id", id), zap.Any("node", nodeSnapshot)) + continue } if !cmp.Equal(n1, n2) { // node updated @@ -632,8 +634,19 @@ func (s *GRPCService) Watch(req *management.WatchRequest, server management.Mana close(chans) }() + // normalize key + key := func(key string) string { + keys := make([]string, 0) + for _, k := range strings.Split(key, "/") { + if k != "" { + keys = append(keys, k) + } + } + return strings.Join(keys, "/") + }(req.Key) + for resp := range chans { - if !strings.HasPrefix(resp.Key, req.Key) { + if !strings.HasPrefix(resp.Key, key) { continue } err := server.Send(&resp) diff --git a/manager/raft_fsm.go b/manager/raft_fsm.go index 6eae3d5..325042d 100644 --- a/manager/raft_fsm.go +++ b/manager/raft_fsm.go @@ -47,7 +47,7 @@ func NewRaftFSM(path string, logger *zap.Logger) (*RaftFSM, error) { } func (f *RaftFSM) Start() error { - f.logger.Info("initialize metadata") + f.logger.Info("initialize cluster") f.cluster = &management.Cluster{Nodes: make(map[string]*management.Node, 0)} f.logger.Info("initialize store data") @@ -205,7 +205,7 @@ func (f *RaftFSM) Apply(l *raft.Log) interface{} { f.logger.Error(err.Error()) return &fsmResponse{error: err} } - err = f.SetValue(data["key"].(string), data["value"], true) + err = f.SetValue(data["key"].(string), data["value"], false) return &fsmResponse{error: err} case deleteKeyValue: var data map[string]interface{} diff --git a/manager/raft_server.go b/manager/raft_server.go index 257052f..76e3324 100644 --- a/manager/raft_server.go +++ b/manager/raft_server.go @@ -36,7 +36,6 @@ import ( ) type RaftServer struct { - //nodeId string node *management.Node dataDir string raftStorageType string @@ -51,7 +50,6 @@ type RaftServer struct { func NewRaftServer(node *management.Node, dataDir string, raftStorageType string, indexConfig *config.IndexConfig, bootstrap bool, logger *zap.Logger) (*RaftServer, error) { return &RaftServer{ - //nodeId: nodeId, node: node, dataDir: dataDir, raftStorageType: raftStorageType, @@ -79,11 +77,14 @@ func (s *RaftServer) Start() error { return err } - s.logger.Info("create Raft config", zap.String("node_id", s.node.Id)) + s.logger.Info("create Raft config", zap.String("id", s.node.Id)) raftConfig := raft.DefaultConfig() raftConfig.LocalID = raft.ServerID(s.node.Id) raftConfig.SnapshotThreshold = 1024 raftConfig.LogOutput = ioutil.Discard + //if s.bootstrap { + // raftConfig.StartAsLeader = true + //} s.logger.Info("resolve TCP address", zap.String("bind_addr", s.node.BindAddress)) addr, err := net.ResolveTCPAddr("tcp", s.node.BindAddress) diff --git a/manager/server_test.go b/manager/server_test.go index 1bf019b..b61bdae 100644 --- a/manager/server_test.go +++ b/manager/server_test.go @@ -42,6 +42,9 @@ func TestServer_Start(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -90,6 +93,9 @@ func TestServer_HealthCheck(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -185,6 +191,9 @@ func TestServer_GetNode(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -266,6 +275,9 @@ func TestServer_GetCluster(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -351,6 +363,9 @@ func TestServer_SetState(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -433,6 +448,9 @@ func TestServer_GetState(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -515,6 +533,9 @@ func TestServer_DeleteState(t *testing.T) { nodeId := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir) + }() raftStorageType := "boltdb" node := &management.Node{ @@ -618,6 +639,9 @@ func TestCluster_Start(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -655,6 +679,9 @@ func TestCluster_Start(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -692,6 +719,9 @@ func TestCluster_Start(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -740,6 +770,9 @@ func TestCluster_HealthCheck(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -777,6 +810,9 @@ func TestCluster_HealthCheck(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -814,6 +850,9 @@ func TestCluster_HealthCheck(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -969,7 +1008,6 @@ func TestCluster_HealthCheck(t *testing.T) { if expReadiness3 != actReadiness3 { t.Fatalf("expected content to see %v, saw %v", expReadiness3, actReadiness3) } - } func TestCluster_GetNode(t *testing.T) { @@ -985,6 +1023,9 @@ func TestCluster_GetNode(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1022,6 +1063,9 @@ func TestCluster_GetNode(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -1059,6 +1103,9 @@ func TestCluster_GetNode(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -1185,6 +1232,9 @@ func TestCluster_GetCluster(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1222,6 +1272,9 @@ func TestCluster_GetCluster(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -1259,6 +1312,9 @@ func TestCluster_GetCluster(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -1451,6 +1507,9 @@ func TestCluster_SetState(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1488,6 +1547,9 @@ func TestCluster_SetState(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -1525,6 +1587,9 @@ func TestCluster_SetState(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -1701,6 +1766,9 @@ func TestCluster_GetState(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1738,6 +1806,9 @@ func TestCluster_GetState(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -1775,6 +1846,9 @@ func TestCluster_GetState(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ @@ -1951,6 +2025,9 @@ func TestCluster_DeleteState(t *testing.T) { nodeId1 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress1 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir1 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir1) + }() raftStorageType1 := "boltdb" node1 := &management.Node{ @@ -1988,6 +2065,9 @@ func TestCluster_DeleteState(t *testing.T) { nodeId2 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress2 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir2 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir2) + }() raftStorageType2 := "boltdb" node2 := &management.Node{ @@ -2025,6 +2105,9 @@ func TestCluster_DeleteState(t *testing.T) { nodeId3 := fmt.Sprintf("node-%s", strutils.RandStr(5)) bindAddress3 := fmt.Sprintf(":%d", testutils.TmpPort()) dataDir3 := testutils.TmpDir() + defer func() { + _ = os.RemoveAll(dataDir3) + }() raftStorageType3 := "boltdb" node3 := &management.Node{ diff --git a/protobuf/distribute/distribute.pb.go b/protobuf/distribute/distribute.pb.go index b935dea..9a8174f 100644 --- a/protobuf/distribute/distribute.pb.go +++ b/protobuf/distribute/distribute.pb.go @@ -8,7 +8,6 @@ import ( fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" - empty "github.com/golang/protobuf/ptypes/empty" grpc "google.golang.org/grpc" math "math" ) @@ -24,140 +23,147 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -type LivenessProbeResponse_State int32 +type NodeHealthCheckRequest_Probe int32 const ( - LivenessProbeResponse_UNKNOWN LivenessProbeResponse_State = 0 - LivenessProbeResponse_ALIVE LivenessProbeResponse_State = 1 - LivenessProbeResponse_DEAD LivenessProbeResponse_State = 2 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 ) -var LivenessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ALIVE", - 2: "DEAD", +var NodeHealthCheckRequest_Probe_name = map[int32]string{ + 0: "HEALTHINESS", + 1: "LIVENESS", + 2: "READINESS", } -var LivenessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "ALIVE": 1, - "DEAD": 2, +var NodeHealthCheckRequest_Probe_value = map[string]int32{ + "HEALTHINESS": 0, + "LIVENESS": 1, + "READINESS": 2, } -func (x LivenessProbeResponse_State) String() string { - return proto.EnumName(LivenessProbeResponse_State_name, int32(x)) +func (x NodeHealthCheckRequest_Probe) String() string { + return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) } -func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { +func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{0, 0} } -type ReadinessProbeResponse_State int32 +type NodeHealthCheckResponse_State int32 const ( - ReadinessProbeResponse_UNKNOWN ReadinessProbeResponse_State = 0 - ReadinessProbeResponse_READY ReadinessProbeResponse_State = 1 - ReadinessProbeResponse_NOT_READY ReadinessProbeResponse_State = 2 + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 ) -var ReadinessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READY", - 2: "NOT_READY", +var NodeHealthCheckResponse_State_name = map[int32]string{ + 0: "HEALTHY", + 1: "UNHEALTHY", + 2: "ALIVE", + 3: "DEAD", + 4: "READY", + 5: "NOT_READY", } -var ReadinessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "READY": 1, - "NOT_READY": 2, +var NodeHealthCheckResponse_State_value = map[string]int32{ + "HEALTHY": 0, + "UNHEALTHY": 1, + "ALIVE": 2, + "DEAD": 3, + "READY": 4, + "NOT_READY": 5, } -func (x ReadinessProbeResponse_State) String() string { - return proto.EnumName(ReadinessProbeResponse_State_name, int32(x)) +func (x NodeHealthCheckResponse_State) String() string { + return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) } -func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { +func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{1, 0} } -// use for health check -type LivenessProbeResponse struct { - State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.LivenessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeHealthCheckRequest struct { + Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=distribute.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } -func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*LivenessProbeResponse) ProtoMessage() {} -func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { +func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } +func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckRequest) ProtoMessage() {} +func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{0} } -func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LivenessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) } -func (m *LivenessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LivenessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) } -func (m *LivenessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LivenessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) } -func (m *LivenessProbeResponse) XXX_Size() int { - return xxx_messageInfo_LivenessProbeResponse.Size(m) +func (m *NodeHealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckRequest.Size(m) } -func (m *LivenessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LivenessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) } -var xxx_messageInfo_LivenessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo -func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { +func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { - return m.State + return m.Probe } - return LivenessProbeResponse_UNKNOWN + return NodeHealthCheckRequest_HEALTHINESS } -// use for health check -type ReadinessProbeResponse struct { - State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.ReadinessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeHealthCheckResponse struct { + State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=distribute.NodeHealthCheckResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} } -func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ReadinessProbeResponse) ProtoMessage() {} -func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { +func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } +func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckResponse) ProtoMessage() {} +func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_0b1b3e8a99d31c9c, []int{1} } -func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadinessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) } -func (m *ReadinessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadinessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) } -func (m *ReadinessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadinessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) } -func (m *ReadinessProbeResponse) XXX_Size() int { - return xxx_messageInfo_ReadinessProbeResponse.Size(m) +func (m *NodeHealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckResponse.Size(m) } -func (m *ReadinessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadinessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) } -var xxx_messageInfo_ReadinessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo -func (m *ReadinessProbeResponse) GetState() ReadinessProbeResponse_State { +func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return ReadinessProbeResponse_UNKNOWN + return NodeHealthCheckResponse_HEALTHY } type GetDocumentRequest struct { @@ -481,10 +487,10 @@ func (m *SearchResponse) GetSearchResult() *any.Any { } func init() { - proto.RegisterEnum("distribute.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) - proto.RegisterEnum("distribute.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) - proto.RegisterType((*LivenessProbeResponse)(nil), "distribute.LivenessProbeResponse") - proto.RegisterType((*ReadinessProbeResponse)(nil), "distribute.ReadinessProbeResponse") + proto.RegisterEnum("distribute.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) + proto.RegisterEnum("distribute.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) + proto.RegisterType((*NodeHealthCheckRequest)(nil), "distribute.NodeHealthCheckRequest") + proto.RegisterType((*NodeHealthCheckResponse)(nil), "distribute.NodeHealthCheckResponse") proto.RegisterType((*GetDocumentRequest)(nil), "distribute.GetDocumentRequest") proto.RegisterType((*GetDocumentResponse)(nil), "distribute.GetDocumentResponse") proto.RegisterType((*IndexDocumentRequest)(nil), "distribute.IndexDocumentRequest") @@ -500,40 +506,42 @@ func init() { } var fileDescriptor_0b1b3e8a99d31c9c = []byte{ - // 528 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xdd, 0x8f, 0xd2, 0x4c, - 0x14, 0xc6, 0x29, 0xef, 0x0b, 0xca, 0xc1, 0x36, 0x64, 0x04, 0xe2, 0xd6, 0x44, 0xd7, 0xc9, 0x26, - 0x8b, 0xd1, 0x2d, 0x09, 0x5e, 0x19, 0xa3, 0x09, 0x5a, 0x62, 0x36, 0x4b, 0xba, 0x9b, 0xee, 0xfa, - 0x7d, 0xb1, 0x69, 0xe9, 0x59, 0xb6, 0xb1, 0x74, 0xb0, 0x33, 0x35, 0xee, 0xa5, 0x77, 0xfe, 0xc5, - 0x5e, 0x1b, 0xfa, 0x81, 0x1d, 0x28, 0xe0, 0x1d, 0x73, 0xce, 0x79, 0x7e, 0xf3, 0xf4, 0xe4, 0x19, - 0xe0, 0x60, 0x1e, 0x31, 0xc1, 0xdc, 0xf8, 0xaa, 0xef, 0xf9, 0x5c, 0x44, 0xbe, 0x1b, 0x0b, 0x2c, - 0xfc, 0x34, 0x92, 0x36, 0x81, 0xbf, 0x15, 0x7d, 0x6f, 0xca, 0xd8, 0x34, 0xc0, 0xfe, 0x52, 0xe8, - 0x84, 0x37, 0xe9, 0x98, 0x7e, 0x7f, 0xb5, 0x85, 0xb3, 0xb9, 0xc8, 0x9a, 0xf4, 0xa7, 0x02, 0x9d, - 0xb1, 0xff, 0x1d, 0x43, 0xe4, 0xfc, 0x2c, 0x62, 0x2e, 0xda, 0xc8, 0xe7, 0x2c, 0xe4, 0x48, 0x5e, - 0x42, 0x8d, 0x0b, 0x47, 0xe0, 0x3d, 0x65, 0x5f, 0xe9, 0x69, 0x83, 0x43, 0xa3, 0x70, 0x7f, 0xa9, - 0xc2, 0x38, 0x5f, 0x8c, 0xdb, 0xa9, 0x8a, 0x3e, 0x86, 0x5a, 0x72, 0x26, 0x4d, 0xb8, 0xf5, 0xce, - 0x3a, 0xb1, 0x4e, 0x3f, 0x58, 0xad, 0x0a, 0x69, 0x40, 0x6d, 0x38, 0x3e, 0x7e, 0x3f, 0x6a, 0x29, - 0xe4, 0x36, 0xfc, 0x6f, 0x8e, 0x86, 0x66, 0xab, 0x4a, 0x7f, 0x29, 0xd0, 0xb5, 0xd1, 0xf1, 0xfc, - 0x75, 0x13, 0xaf, 0x64, 0x13, 0xbd, 0xa2, 0x89, 0x72, 0x89, 0xec, 0xc2, 0xd8, 0xe4, 0xc2, 0x1e, - 0x0d, 0xcd, 0x4f, 0x2d, 0x85, 0xa8, 0xd0, 0xb0, 0x4e, 0x2f, 0x2e, 0xd3, 0x63, 0x95, 0x1e, 0x00, - 0x79, 0x8b, 0xc2, 0x64, 0x93, 0x78, 0x86, 0xa1, 0xb0, 0xf1, 0x5b, 0x8c, 0x5c, 0x10, 0x0d, 0xaa, - 0xbe, 0x97, 0x58, 0x68, 0xd8, 0x55, 0xdf, 0xa3, 0x6f, 0xe0, 0xae, 0x34, 0x95, 0x99, 0x7d, 0x0a, - 0xf5, 0x2b, 0x1f, 0x03, 0x8f, 0x27, 0xa3, 0xcd, 0x41, 0xdb, 0x48, 0x37, 0x6f, 0xe4, 0x9b, 0x37, - 0x86, 0xe1, 0x8d, 0x9d, 0xcd, 0xd0, 0x0b, 0x68, 0x1f, 0x87, 0x1e, 0xfe, 0xd8, 0x71, 0x59, 0x81, - 0x5a, 0xfd, 0x07, 0xea, 0x11, 0x74, 0x56, 0xa8, 0x99, 0xb9, 0x36, 0xd4, 0x26, 0x2c, 0x0e, 0x45, - 0x42, 0xae, 0xd9, 0xe9, 0x81, 0x1e, 0x42, 0xc7, 0xc4, 0x00, 0x05, 0xee, 0xfa, 0x64, 0x03, 0xba, - 0xab, 0x83, 0x5b, 0xc1, 0x63, 0x50, 0xcf, 0xd1, 0x89, 0x26, 0xd7, 0x39, 0xf0, 0x05, 0x68, 0x3c, - 0x29, 0x5c, 0x46, 0x69, 0x65, 0xeb, 0x92, 0x54, 0x5e, 0x14, 0xd3, 0x13, 0xd0, 0x72, 0x5a, 0x76, - 0xeb, 0x73, 0x50, 0x97, 0x38, 0x1e, 0x07, 0xdb, 0x69, 0x77, 0x72, 0xda, 0x62, 0x72, 0xf0, 0xfb, - 0x3f, 0x00, 0x73, 0x19, 0x23, 0x32, 0x06, 0x55, 0x8a, 0x33, 0xe9, 0xae, 0x31, 0x46, 0x8b, 0x07, - 0xa3, 0x3f, 0xda, 0xf9, 0x02, 0x68, 0x85, 0x58, 0xa0, 0xc9, 0xb9, 0xdc, 0x88, 0xa3, 0xbb, 0xb3, - 0x4c, 0x2b, 0xe4, 0x0c, 0x9a, 0x85, 0xa8, 0x91, 0x07, 0x45, 0xd1, 0x7a, 0x52, 0xf5, 0x87, 0x1b, - 0xfb, 0x4b, 0xe2, 0x47, 0x50, 0xa5, 0x84, 0x90, 0xfd, 0xa2, 0xa6, 0x2c, 0x92, 0xf2, 0x97, 0x97, - 0xc6, 0x8b, 0x56, 0x7a, 0x0a, 0xf9, 0x02, 0x9a, 0x9c, 0x11, 0x22, 0x09, 0x4b, 0x83, 0x26, 0xaf, - 0xa1, 0x3c, 0x62, 0x09, 0x7c, 0x08, 0xf5, 0x34, 0x02, 0x64, 0xaf, 0xa8, 0x90, 0x42, 0xa6, 0xeb, - 0x65, 0xad, 0x1c, 0xf2, 0xfa, 0xe8, 0xf3, 0x93, 0xa9, 0x2f, 0xae, 0x63, 0xd7, 0x98, 0xb0, 0x59, - 0x7f, 0xc6, 0x78, 0xfc, 0xd5, 0xe9, 0xbb, 0x81, 0xc3, 0x45, 0xbf, 0xe4, 0xff, 0xd6, 0xad, 0x27, - 0xc5, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf3, 0xf4, 0x4b, 0x0a, 0x8d, 0x05, 0x00, 0x00, + // 547 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xef, 0x6f, 0xd2, 0x40, + 0x18, 0xa6, 0x6c, 0xc5, 0xf1, 0x32, 0x18, 0x39, 0xd9, 0x74, 0xfd, 0xa0, 0xf3, 0x5c, 0x22, 0x46, + 0x57, 0x12, 0x8c, 0x1f, 0x8c, 0x89, 0xa6, 0xae, 0x44, 0x88, 0x04, 0x97, 0x82, 0xc6, 0xa9, 0xc9, + 0xd2, 0x1f, 0x37, 0x68, 0x56, 0x7a, 0xd8, 0xbb, 0x26, 0xee, 0xaf, 0xf0, 0x3f, 0xf1, 0xa3, 0x7f, + 0x9f, 0x69, 0x8f, 0x62, 0xcb, 0x6a, 0xd9, 0x37, 0xde, 0xf7, 0x7d, 0x9e, 0xe7, 0x9e, 0xbb, 0xf7, + 0xa1, 0x70, 0xbc, 0x08, 0x28, 0xa7, 0x56, 0x78, 0xd9, 0x71, 0x5c, 0xc6, 0x03, 0xd7, 0x0a, 0x39, + 0x49, 0xfd, 0x54, 0xe3, 0x31, 0x82, 0x7f, 0x1d, 0xe5, 0x70, 0x4a, 0xe9, 0xd4, 0x23, 0x9d, 0x15, + 0xd1, 0xf4, 0xaf, 0x05, 0x0c, 0xff, 0x92, 0xe0, 0x60, 0x44, 0x1d, 0xd2, 0x27, 0xa6, 0xc7, 0x67, + 0xa7, 0x33, 0x62, 0x5f, 0x19, 0xe4, 0x47, 0x48, 0x18, 0x47, 0x6f, 0x40, 0x5e, 0x04, 0xd4, 0x22, + 0xf7, 0xa5, 0x23, 0xa9, 0xdd, 0xe8, 0xb6, 0xd5, 0xd4, 0x19, 0xf9, 0x14, 0xf5, 0x2c, 0xc2, 0x1b, + 0x82, 0x86, 0x5f, 0x82, 0x1c, 0xd7, 0x68, 0x0f, 0x6a, 0xfd, 0x9e, 0x36, 0x9c, 0xf4, 0x07, 0xa3, + 0xde, 0x78, 0xdc, 0x2c, 0xa1, 0x5d, 0xd8, 0x19, 0x0e, 0x3e, 0xf7, 0xe2, 0x4a, 0x42, 0x75, 0xa8, + 0x1a, 0x3d, 0x4d, 0x17, 0xc3, 0x32, 0xfe, 0x2d, 0xc1, 0xbd, 0x1b, 0xf2, 0x6c, 0x41, 0x7d, 0x46, + 0xd0, 0x5b, 0x90, 0x19, 0x37, 0x79, 0x62, 0xe9, 0x69, 0xa1, 0x25, 0xc1, 0x51, 0xc7, 0x11, 0xc1, + 0x10, 0x3c, 0x6c, 0x80, 0x1c, 0xd7, 0xa8, 0x06, 0x77, 0x84, 0xa7, 0xf3, 0x66, 0x29, 0x72, 0xf0, + 0x69, 0x94, 0x94, 0x12, 0xaa, 0x82, 0xac, 0x45, 0xfe, 0x9a, 0x65, 0xb4, 0x03, 0xdb, 0x7a, 0x4f, + 0xd3, 0x9b, 0x5b, 0x51, 0x33, 0x72, 0x79, 0xde, 0xdc, 0x8e, 0xe0, 0xa3, 0x8f, 0x93, 0x0b, 0x51, + 0xca, 0xf8, 0x18, 0xd0, 0x7b, 0xc2, 0x75, 0x6a, 0x87, 0x73, 0xe2, 0xf3, 0xe4, 0xf5, 0x1a, 0x50, + 0x76, 0x9d, 0xd8, 0x67, 0xd5, 0x28, 0xbb, 0x0e, 0x3e, 0x85, 0xbb, 0x19, 0xd4, 0xf2, 0x46, 0xcf, + 0xa1, 0x72, 0xe9, 0x12, 0xcf, 0x61, 0x31, 0xb4, 0xd6, 0x6d, 0xa9, 0x62, 0x57, 0x6a, 0xb2, 0x2b, + 0x55, 0xf3, 0xaf, 0x8d, 0x25, 0x06, 0x4f, 0xa0, 0x35, 0xf0, 0x1d, 0xf2, 0x73, 0xc3, 0x61, 0x29, + 0xd5, 0xf2, 0x2d, 0x54, 0x4f, 0x60, 0x7f, 0x4d, 0x75, 0x69, 0xae, 0x05, 0xb2, 0x4d, 0x43, 0x9f, + 0xc7, 0xca, 0xb2, 0x21, 0x0a, 0xfc, 0x04, 0xf6, 0x75, 0xe2, 0x11, 0x4e, 0x36, 0x5d, 0x59, 0x85, + 0x83, 0x75, 0x60, 0xa1, 0xf0, 0x10, 0xea, 0x63, 0x62, 0x06, 0xf6, 0x2c, 0x11, 0x7c, 0x0d, 0x0d, + 0x16, 0x37, 0x2e, 0x02, 0xd1, 0x29, 0x7c, 0xa4, 0x3a, 0x4b, 0x93, 0xf1, 0x07, 0x68, 0x24, 0x6a, + 0xcb, 0x53, 0x5f, 0x41, 0x7d, 0x25, 0xc7, 0x42, 0xaf, 0x58, 0x6d, 0x37, 0x51, 0x8b, 0x90, 0xdd, + 0x3f, 0x5b, 0x00, 0xfa, 0x2a, 0x6b, 0xe8, 0x3b, 0xec, 0xad, 0xc5, 0x0d, 0xe1, 0xcd, 0x7f, 0x0f, + 0xe5, 0xf1, 0x2d, 0xf2, 0x8a, 0x4b, 0xe8, 0x0c, 0x6a, 0xa9, 0xa8, 0xa0, 0x07, 0x69, 0xd6, 0xcd, + 0xa4, 0x29, 0x0f, 0xff, 0x3b, 0x5f, 0x29, 0x7e, 0x81, 0x7a, 0x66, 0xc3, 0xe8, 0x28, 0xcd, 0xc9, + 0x8b, 0x94, 0xf2, 0xa8, 0x00, 0x91, 0xe8, 0xb6, 0x25, 0xf4, 0x0d, 0x1a, 0xd9, 0x1d, 0xa3, 0x0c, + 0x31, 0x37, 0x28, 0x0a, 0x2e, 0x82, 0xa4, 0xc4, 0x35, 0xa8, 0x88, 0x15, 0xa2, 0xc3, 0x34, 0x23, + 0x13, 0x12, 0x45, 0xc9, 0x1b, 0x25, 0x22, 0xef, 0x4e, 0xbe, 0x3e, 0x9b, 0xba, 0x7c, 0x16, 0x5a, + 0xaa, 0x4d, 0xe7, 0x9d, 0x39, 0x65, 0xe1, 0x95, 0xd9, 0xb1, 0x3c, 0x93, 0xf1, 0x4e, 0xce, 0x67, + 0xd4, 0xaa, 0xc4, 0xcd, 0x17, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x35, 0xcf, 0xa0, 0x84, 0x64, + 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -548,8 +556,7 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type DistributeClient interface { - LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) - ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) + NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_IndexDocumentClient, error) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Distribute_DeleteDocumentClient, error) @@ -564,18 +571,9 @@ func NewDistributeClient(cc *grpc.ClientConn) DistributeClient { return &distributeClient{cc} } -func (c *distributeClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { - out := new(LivenessProbeResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/LivenessProbe", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *distributeClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { - out := new(ReadinessProbeResponse) - err := c.cc.Invoke(ctx, "/distribute.Distribute/ReadinessProbe", in, out, opts...) +func (c *distributeClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { + out := new(NodeHealthCheckResponse) + err := c.cc.Invoke(ctx, "/distribute.Distribute/NodeHealthCheck", in, out, opts...) if err != nil { return nil, err } @@ -670,8 +668,7 @@ func (c *distributeClient) Search(ctx context.Context, in *SearchRequest, opts . // DistributeServer is the server API for Distribute service. type DistributeServer interface { - LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) - ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) + NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) IndexDocument(Distribute_IndexDocumentServer) error DeleteDocument(Distribute_DeleteDocumentServer) error @@ -682,38 +679,20 @@ func RegisterDistributeServer(s *grpc.Server, srv DistributeServer) { s.RegisterService(&_Distribute_serviceDesc, srv) } -func _Distribute_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DistributeServer).LivenessProbe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/distribute.Distribute/LivenessProbe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).LivenessProbe(ctx, req.(*empty.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Distribute_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) +func _Distribute_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeHealthCheckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(DistributeServer).ReadinessProbe(ctx, in) + return srv.(DistributeServer).NodeHealthCheck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/distribute.Distribute/ReadinessProbe", + FullMethod: "/distribute.Distribute/NodeHealthCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributeServer).ReadinessProbe(ctx, req.(*empty.Empty)) + return srv.(DistributeServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) } return interceptor(ctx, in, info, handler) } @@ -811,12 +790,8 @@ var _Distribute_serviceDesc = grpc.ServiceDesc{ HandlerType: (*DistributeServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "LivenessProbe", - Handler: _Distribute_LivenessProbe_Handler, - }, - { - MethodName: "ReadinessProbe", - Handler: _Distribute_ReadinessProbe_Handler, + MethodName: "NodeHealthCheck", + Handler: _Distribute_NodeHealthCheck_Handler, }, { MethodName: "GetDocument", diff --git a/protobuf/distribute/distribute.proto b/protobuf/distribute/distribute.proto index c2c6f20..7f08a56 100644 --- a/protobuf/distribute/distribute.proto +++ b/protobuf/distribute/distribute.proto @@ -15,15 +15,13 @@ syntax = "proto3"; import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; package distribute; option go_package = "github.com/mosuka/blast/protobuf/distribute"; service Distribute { - rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} - rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} @@ -31,22 +29,23 @@ service Distribute { rpc Search (SearchRequest) returns (SearchResponse) {} } -// use for health check -message LivenessProbeResponse { - enum State { - UNKNOWN = 0; - ALIVE = 1; - DEAD = 2; +message NodeHealthCheckRequest { + enum Probe { + HEALTHINESS = 0; + LIVENESS = 1; + READINESS = 2; } - State state = 1; + Probe probe = 1; } -// use for health check -message ReadinessProbeResponse { +message NodeHealthCheckResponse { enum State { - UNKNOWN = 0; - READY = 1; - NOT_READY = 2; + HEALTHY = 0; + UNHEALTHY = 1; + ALIVE = 2; + DEAD = 3; + READY = 4; + NOT_READY = 5; } State state = 1; } diff --git a/protobuf/index/index.pb.go b/protobuf/index/index.pb.go index e0e0dcf..8b42e8c 100644 --- a/protobuf/index/index.pb.go +++ b/protobuf/index/index.pb.go @@ -24,352 +24,568 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -type LivenessProbeResponse_State int32 +type NodeHealthCheckRequest_Probe int32 const ( - LivenessProbeResponse_UNKNOWN LivenessProbeResponse_State = 0 - LivenessProbeResponse_ALIVE LivenessProbeResponse_State = 1 - LivenessProbeResponse_DEAD LivenessProbeResponse_State = 2 + NodeHealthCheckRequest_HEALTHINESS NodeHealthCheckRequest_Probe = 0 + NodeHealthCheckRequest_LIVENESS NodeHealthCheckRequest_Probe = 1 + NodeHealthCheckRequest_READINESS NodeHealthCheckRequest_Probe = 2 ) -var LivenessProbeResponse_State_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ALIVE", - 2: "DEAD", +var NodeHealthCheckRequest_Probe_name = map[int32]string{ + 0: "HEALTHINESS", + 1: "LIVENESS", + 2: "READINESS", } -var LivenessProbeResponse_State_value = map[string]int32{ - "UNKNOWN": 0, - "ALIVE": 1, - "DEAD": 2, +var NodeHealthCheckRequest_Probe_value = map[string]int32{ + "HEALTHINESS": 0, + "LIVENESS": 1, + "READINESS": 2, } -func (x LivenessProbeResponse_State) String() string { - return proto.EnumName(LivenessProbeResponse_State_name, int32(x)) +func (x NodeHealthCheckRequest_Probe) String() string { + return proto.EnumName(NodeHealthCheckRequest_Probe_name, int32(x)) } -func (LivenessProbeResponse_State) EnumDescriptor() ([]byte, []int) { +func (NodeHealthCheckRequest_Probe) EnumDescriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{0, 0} } -type ReadinessProbeResponse_State int32 +type NodeHealthCheckResponse_State int32 + +const ( + NodeHealthCheckResponse_HEALTHY NodeHealthCheckResponse_State = 0 + NodeHealthCheckResponse_UNHEALTHY NodeHealthCheckResponse_State = 1 + NodeHealthCheckResponse_ALIVE NodeHealthCheckResponse_State = 2 + NodeHealthCheckResponse_DEAD NodeHealthCheckResponse_State = 3 + NodeHealthCheckResponse_READY NodeHealthCheckResponse_State = 4 + NodeHealthCheckResponse_NOT_READY NodeHealthCheckResponse_State = 5 +) + +var NodeHealthCheckResponse_State_name = map[int32]string{ + 0: "HEALTHY", + 1: "UNHEALTHY", + 2: "ALIVE", + 3: "DEAD", + 4: "READY", + 5: "NOT_READY", +} + +var NodeHealthCheckResponse_State_value = map[string]int32{ + "HEALTHY": 0, + "UNHEALTHY": 1, + "ALIVE": 2, + "DEAD": 3, + "READY": 4, + "NOT_READY": 5, +} + +func (x NodeHealthCheckResponse_State) String() string { + return proto.EnumName(NodeHealthCheckResponse_State_name, int32(x)) +} + +func (NodeHealthCheckResponse_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{1, 0} +} + +type Node_State int32 const ( - ReadinessProbeResponse_UNKNOWN ReadinessProbeResponse_State = 0 - ReadinessProbeResponse_READY ReadinessProbeResponse_State = 1 - ReadinessProbeResponse_NOT_READY ReadinessProbeResponse_State = 2 + Node_UNKNOWN Node_State = 0 + Node_FOLLOWER Node_State = 1 + Node_CANDIDATE Node_State = 2 + Node_LEADER Node_State = 3 + Node_SHUTDOWN Node_State = 4 ) -var ReadinessProbeResponse_State_name = map[int32]string{ +var Node_State_name = map[int32]string{ 0: "UNKNOWN", - 1: "READY", - 2: "NOT_READY", + 1: "FOLLOWER", + 2: "CANDIDATE", + 3: "LEADER", + 4: "SHUTDOWN", } -var ReadinessProbeResponse_State_value = map[string]int32{ +var Node_State_value = map[string]int32{ "UNKNOWN": 0, - "READY": 1, - "NOT_READY": 2, + "FOLLOWER": 1, + "CANDIDATE": 2, + "LEADER": 3, + "SHUTDOWN": 4, } -func (x ReadinessProbeResponse_State) String() string { - return proto.EnumName(ReadinessProbeResponse_State_name, int32(x)) +func (x Node_State) String() string { + return proto.EnumName(Node_State_name, int32(x)) } -func (ReadinessProbeResponse_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{1, 0} +func (Node_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{3, 0} } -// use for health check -type LivenessProbeResponse struct { - State LivenessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.LivenessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ClusterWatchResponse_Event int32 + +const ( + ClusterWatchResponse_UNKNOWN ClusterWatchResponse_Event = 0 + ClusterWatchResponse_JOIN ClusterWatchResponse_Event = 1 + ClusterWatchResponse_LEAVE ClusterWatchResponse_Event = 2 + ClusterWatchResponse_UPDATE ClusterWatchResponse_Event = 3 +) + +var ClusterWatchResponse_Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "JOIN", + 2: "LEAVE", + 3: "UPDATE", } -func (m *LivenessProbeResponse) Reset() { *m = LivenessProbeResponse{} } -func (m *LivenessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*LivenessProbeResponse) ProtoMessage() {} -func (*LivenessProbeResponse) Descriptor() ([]byte, []int) { +var ClusterWatchResponse_Event_value = map[string]int32{ + "UNKNOWN": 0, + "JOIN": 1, + "LEAVE": 2, + "UPDATE": 3, +} + +func (x ClusterWatchResponse_Event) String() string { + return proto.EnumName(ClusterWatchResponse_Event_name, int32(x)) +} + +func (ClusterWatchResponse_Event) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{9, 0} +} + +type NodeHealthCheckRequest struct { + Probe NodeHealthCheckRequest_Probe `protobuf:"varint,1,opt,name=probe,proto3,enum=index.NodeHealthCheckRequest_Probe" json:"probe,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeHealthCheckRequest) Reset() { *m = NodeHealthCheckRequest{} } +func (m *NodeHealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckRequest) ProtoMessage() {} +func (*NodeHealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{0} } -func (m *LivenessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LivenessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckRequest.Unmarshal(m, b) } -func (m *LivenessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LivenessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckRequest.Marshal(b, m, deterministic) } -func (m *LivenessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LivenessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckRequest.Merge(m, src) } -func (m *LivenessProbeResponse) XXX_Size() int { - return xxx_messageInfo_LivenessProbeResponse.Size(m) +func (m *NodeHealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckRequest.Size(m) } -func (m *LivenessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LivenessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckRequest.DiscardUnknown(m) } -var xxx_messageInfo_LivenessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckRequest proto.InternalMessageInfo -func (m *LivenessProbeResponse) GetState() LivenessProbeResponse_State { +func (m *NodeHealthCheckRequest) GetProbe() NodeHealthCheckRequest_Probe { if m != nil { - return m.State + return m.Probe } - return LivenessProbeResponse_UNKNOWN + return NodeHealthCheckRequest_HEALTHINESS } -// use for health check -type ReadinessProbeResponse struct { - State ReadinessProbeResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.ReadinessProbeResponse_State" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeHealthCheckResponse struct { + State NodeHealthCheckResponse_State `protobuf:"varint,1,opt,name=state,proto3,enum=index.NodeHealthCheckResponse_State" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ReadinessProbeResponse) Reset() { *m = ReadinessProbeResponse{} } -func (m *ReadinessProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ReadinessProbeResponse) ProtoMessage() {} -func (*ReadinessProbeResponse) Descriptor() ([]byte, []int) { +func (m *NodeHealthCheckResponse) Reset() { *m = NodeHealthCheckResponse{} } +func (m *NodeHealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*NodeHealthCheckResponse) ProtoMessage() {} +func (*NodeHealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{1} } -func (m *ReadinessProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReadinessProbeResponse.Unmarshal(m, b) +func (m *NodeHealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeHealthCheckResponse.Unmarshal(m, b) } -func (m *ReadinessProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReadinessProbeResponse.Marshal(b, m, deterministic) +func (m *NodeHealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeHealthCheckResponse.Marshal(b, m, deterministic) } -func (m *ReadinessProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadinessProbeResponse.Merge(m, src) +func (m *NodeHealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeHealthCheckResponse.Merge(m, src) } -func (m *ReadinessProbeResponse) XXX_Size() int { - return xxx_messageInfo_ReadinessProbeResponse.Size(m) +func (m *NodeHealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_NodeHealthCheckResponse.Size(m) } -func (m *ReadinessProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadinessProbeResponse.DiscardUnknown(m) +func (m *NodeHealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeHealthCheckResponse.DiscardUnknown(m) } -var xxx_messageInfo_ReadinessProbeResponse proto.InternalMessageInfo +var xxx_messageInfo_NodeHealthCheckResponse proto.InternalMessageInfo -func (m *ReadinessProbeResponse) GetState() ReadinessProbeResponse_State { +func (m *NodeHealthCheckResponse) GetState() NodeHealthCheckResponse_State { if m != nil { return m.State } - return ReadinessProbeResponse_UNKNOWN + return NodeHealthCheckResponse_HEALTHY } -// use for raft cluster status -type GetNodeRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +type Metadata struct { + GrpcAddress string `protobuf:"bytes,1,opt,name=grpc_address,json=grpcAddress,proto3" json:"grpc_address,omitempty"` + HttpAddress string `protobuf:"bytes,2,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } -func (m *GetNodeRequest) String() string { return proto.CompactTextString(m) } -func (*GetNodeRequest) ProtoMessage() {} -func (*GetNodeRequest) Descriptor() ([]byte, []int) { +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{2} } -func (m *GetNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNodeRequest.Unmarshal(m, b) +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) } -func (m *GetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNodeRequest.Marshal(b, m, deterministic) +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) } -func (m *GetNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNodeRequest.Merge(m, src) +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) } -func (m *GetNodeRequest) XXX_Size() int { - return xxx_messageInfo_GetNodeRequest.Size(m) +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) } -func (m *GetNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNodeRequest.DiscardUnknown(m) +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) } -var xxx_messageInfo_GetNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_Metadata proto.InternalMessageInfo -func (m *GetNodeRequest) GetId() string { +func (m *Metadata) GetGrpcAddress() string { if m != nil { - return m.Id + return m.GrpcAddress } return "" } -// use for raft cluster status -type GetNodeResponse struct { - NodeConfig *any.Any `protobuf:"bytes,1,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` - State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *Metadata) GetHttpAddress() string { + if m != nil { + return m.HttpAddress + } + return "" +} + +type Node struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + BindAddress string `protobuf:"bytes,2,opt,name=bind_address,json=bindAddress,proto3" json:"bind_address,omitempty"` + State Node_State `protobuf:"varint,3,opt,name=state,proto3,enum=index.Node_State" json:"state,omitempty"` + Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } -func (m *GetNodeResponse) String() string { return proto.CompactTextString(m) } -func (*GetNodeResponse) ProtoMessage() {} -func (*GetNodeResponse) Descriptor() ([]byte, []int) { +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptor_7b2daf652facb3ae, []int{3} } -func (m *GetNodeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNodeResponse.Unmarshal(m, b) +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) } -func (m *GetNodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNodeResponse.Marshal(b, m, deterministic) +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) } -func (m *GetNodeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNodeResponse.Merge(m, src) +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) } -func (m *GetNodeResponse) XXX_Size() int { - return xxx_messageInfo_GetNodeResponse.Size(m) +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) } -func (m *GetNodeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNodeResponse.DiscardUnknown(m) +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) } -var xxx_messageInfo_GetNodeResponse proto.InternalMessageInfo +var xxx_messageInfo_Node proto.InternalMessageInfo -func (m *GetNodeResponse) GetNodeConfig() *any.Any { +func (m *Node) GetId() string { if m != nil { - return m.NodeConfig + return m.Id } - return nil + return "" } -func (m *GetNodeResponse) GetState() string { +func (m *Node) GetBindAddress() string { if m != nil { - return m.State + return m.BindAddress } return "" } -// use for raft cluster status -type SetNodeRequest struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - NodeConfig *any.Any `protobuf:"bytes,2,opt,name=nodeConfig,proto3" json:"nodeConfig,omitempty"` +func (m *Node) GetState() Node_State { + if m != nil { + return m.State + } + return Node_UNKNOWN +} + +func (m *Node) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Cluster struct { + Nodes map[string]*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{4} +} + +func (m *Cluster) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cluster.Unmarshal(m, b) +} +func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cluster.Marshal(b, m, deterministic) +} +func (m *Cluster) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cluster.Merge(m, src) +} +func (m *Cluster) XXX_Size() int { + return xxx_messageInfo_Cluster.Size(m) +} +func (m *Cluster) XXX_DiscardUnknown() { + xxx_messageInfo_Cluster.DiscardUnknown(m) +} + +var xxx_messageInfo_Cluster proto.InternalMessageInfo + +func (m *Cluster) GetNodes() map[string]*Node { + if m != nil { + return m.Nodes + } + return nil +} + +type NodeInfoResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *SetNodeRequest) Reset() { *m = SetNodeRequest{} } -func (m *SetNodeRequest) String() string { return proto.CompactTextString(m) } -func (*SetNodeRequest) ProtoMessage() {} -func (*SetNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{4} +func (m *NodeInfoResponse) Reset() { *m = NodeInfoResponse{} } +func (m *NodeInfoResponse) String() string { return proto.CompactTextString(m) } +func (*NodeInfoResponse) ProtoMessage() {} +func (*NodeInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{5} } -func (m *SetNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNodeRequest.Unmarshal(m, b) +func (m *NodeInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeInfoResponse.Unmarshal(m, b) } -func (m *SetNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNodeRequest.Marshal(b, m, deterministic) +func (m *NodeInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeInfoResponse.Marshal(b, m, deterministic) } -func (m *SetNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNodeRequest.Merge(m, src) +func (m *NodeInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeInfoResponse.Merge(m, src) } -func (m *SetNodeRequest) XXX_Size() int { - return xxx_messageInfo_SetNodeRequest.Size(m) +func (m *NodeInfoResponse) XXX_Size() int { + return xxx_messageInfo_NodeInfoResponse.Size(m) } -func (m *SetNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetNodeRequest.DiscardUnknown(m) +func (m *NodeInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_SetNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_NodeInfoResponse proto.InternalMessageInfo -func (m *SetNodeRequest) GetId() string { +func (m *NodeInfoResponse) GetNode() *Node { if m != nil { - return m.Id + return m.Node } - return "" + return nil } -func (m *SetNodeRequest) GetNodeConfig() *any.Any { +type ClusterJoinRequest struct { + Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterJoinRequest) Reset() { *m = ClusterJoinRequest{} } +func (m *ClusterJoinRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterJoinRequest) ProtoMessage() {} +func (*ClusterJoinRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{6} +} + +func (m *ClusterJoinRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterJoinRequest.Unmarshal(m, b) +} +func (m *ClusterJoinRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterJoinRequest.Marshal(b, m, deterministic) +} +func (m *ClusterJoinRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterJoinRequest.Merge(m, src) +} +func (m *ClusterJoinRequest) XXX_Size() int { + return xxx_messageInfo_ClusterJoinRequest.Size(m) +} +func (m *ClusterJoinRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterJoinRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterJoinRequest proto.InternalMessageInfo + +func (m *ClusterJoinRequest) GetNode() *Node { if m != nil { - return m.NodeConfig + return m.Node } return nil } -// use for raft cluster status -type DeleteNodeRequest struct { +type ClusterLeaveRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *DeleteNodeRequest) Reset() { *m = DeleteNodeRequest{} } -func (m *DeleteNodeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteNodeRequest) ProtoMessage() {} -func (*DeleteNodeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{5} +func (m *ClusterLeaveRequest) Reset() { *m = ClusterLeaveRequest{} } +func (m *ClusterLeaveRequest) String() string { return proto.CompactTextString(m) } +func (*ClusterLeaveRequest) ProtoMessage() {} +func (*ClusterLeaveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{7} } -func (m *DeleteNodeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteNodeRequest.Unmarshal(m, b) +func (m *ClusterLeaveRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterLeaveRequest.Unmarshal(m, b) } -func (m *DeleteNodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteNodeRequest.Marshal(b, m, deterministic) +func (m *ClusterLeaveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterLeaveRequest.Marshal(b, m, deterministic) } -func (m *DeleteNodeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteNodeRequest.Merge(m, src) +func (m *ClusterLeaveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterLeaveRequest.Merge(m, src) } -func (m *DeleteNodeRequest) XXX_Size() int { - return xxx_messageInfo_DeleteNodeRequest.Size(m) +func (m *ClusterLeaveRequest) XXX_Size() int { + return xxx_messageInfo_ClusterLeaveRequest.Size(m) } -func (m *DeleteNodeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteNodeRequest.DiscardUnknown(m) +func (m *ClusterLeaveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterLeaveRequest.DiscardUnknown(m) } -var xxx_messageInfo_DeleteNodeRequest proto.InternalMessageInfo +var xxx_messageInfo_ClusterLeaveRequest proto.InternalMessageInfo -func (m *DeleteNodeRequest) GetId() string { +func (m *ClusterLeaveRequest) GetId() string { if m != nil { return m.Id } return "" } -// use for raft cluster status -type GetClusterResponse struct { - Cluster *any.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` +type ClusterInfoResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } -func (m *GetClusterResponse) String() string { return proto.CompactTextString(m) } -func (*GetClusterResponse) ProtoMessage() {} -func (*GetClusterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{6} +func (m *ClusterInfoResponse) Reset() { *m = ClusterInfoResponse{} } +func (m *ClusterInfoResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterInfoResponse) ProtoMessage() {} +func (*ClusterInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{8} } -func (m *GetClusterResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetClusterResponse.Unmarshal(m, b) +func (m *ClusterInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterInfoResponse.Unmarshal(m, b) } -func (m *GetClusterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetClusterResponse.Marshal(b, m, deterministic) +func (m *ClusterInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterInfoResponse.Marshal(b, m, deterministic) } -func (m *GetClusterResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetClusterResponse.Merge(m, src) +func (m *ClusterInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterInfoResponse.Merge(m, src) } -func (m *GetClusterResponse) XXX_Size() int { - return xxx_messageInfo_GetClusterResponse.Size(m) +func (m *ClusterInfoResponse) XXX_Size() int { + return xxx_messageInfo_ClusterInfoResponse.Size(m) } -func (m *GetClusterResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetClusterResponse.DiscardUnknown(m) +func (m *ClusterInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterInfoResponse.DiscardUnknown(m) } -var xxx_messageInfo_GetClusterResponse proto.InternalMessageInfo +var xxx_messageInfo_ClusterInfoResponse proto.InternalMessageInfo -func (m *GetClusterResponse) GetCluster() *any.Any { +func (m *ClusterInfoResponse) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +type ClusterWatchResponse struct { + Event ClusterWatchResponse_Event `protobuf:"varint,1,opt,name=event,proto3,enum=index.ClusterWatchResponse_Event" json:"event,omitempty"` + Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"` + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClusterWatchResponse) Reset() { *m = ClusterWatchResponse{} } +func (m *ClusterWatchResponse) String() string { return proto.CompactTextString(m) } +func (*ClusterWatchResponse) ProtoMessage() {} +func (*ClusterWatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7b2daf652facb3ae, []int{9} +} + +func (m *ClusterWatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClusterWatchResponse.Unmarshal(m, b) +} +func (m *ClusterWatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClusterWatchResponse.Marshal(b, m, deterministic) +} +func (m *ClusterWatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterWatchResponse.Merge(m, src) +} +func (m *ClusterWatchResponse) XXX_Size() int { + return xxx_messageInfo_ClusterWatchResponse.Size(m) +} +func (m *ClusterWatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterWatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterWatchResponse proto.InternalMessageInfo + +func (m *ClusterWatchResponse) GetEvent() ClusterWatchResponse_Event { + if m != nil { + return m.Event + } + return ClusterWatchResponse_UNKNOWN +} + +func (m *ClusterWatchResponse) GetNode() *Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ClusterWatchResponse) GetCluster() *Cluster { if m != nil { return m.Cluster } @@ -387,7 +603,7 @@ func (m *GetDocumentRequest) Reset() { *m = GetDocumentRequest{} } func (m *GetDocumentRequest) String() string { return proto.CompactTextString(m) } func (*GetDocumentRequest) ProtoMessage() {} func (*GetDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{7} + return fileDescriptor_7b2daf652facb3ae, []int{10} } func (m *GetDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -426,7 +642,7 @@ func (m *GetDocumentResponse) Reset() { *m = GetDocumentResponse{} } func (m *GetDocumentResponse) String() string { return proto.CompactTextString(m) } func (*GetDocumentResponse) ProtoMessage() {} func (*GetDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{8} + return fileDescriptor_7b2daf652facb3ae, []int{11} } func (m *GetDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -466,7 +682,7 @@ func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} } func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) } func (*IndexDocumentRequest) ProtoMessage() {} func (*IndexDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{9} + return fileDescriptor_7b2daf652facb3ae, []int{12} } func (m *IndexDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -512,7 +728,7 @@ func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} } func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) } func (*IndexDocumentResponse) ProtoMessage() {} func (*IndexDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{10} + return fileDescriptor_7b2daf652facb3ae, []int{13} } func (m *IndexDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -551,7 +767,7 @@ func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} } func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentRequest) ProtoMessage() {} func (*DeleteDocumentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{11} + return fileDescriptor_7b2daf652facb3ae, []int{14} } func (m *DeleteDocumentRequest) XXX_Unmarshal(b []byte) error { @@ -590,7 +806,7 @@ func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) } func (*DeleteDocumentResponse) ProtoMessage() {} func (*DeleteDocumentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{12} + return fileDescriptor_7b2daf652facb3ae, []int{15} } func (m *DeleteDocumentResponse) XXX_Unmarshal(b []byte) error { @@ -629,7 +845,7 @@ func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{13} + return fileDescriptor_7b2daf652facb3ae, []int{16} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { @@ -668,7 +884,7 @@ func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{14} + return fileDescriptor_7b2daf652facb3ae, []int{17} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { @@ -707,7 +923,7 @@ func (m *GetIndexConfigResponse) Reset() { *m = GetIndexConfigResponse{} func (m *GetIndexConfigResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexConfigResponse) ProtoMessage() {} func (*GetIndexConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{15} + return fileDescriptor_7b2daf652facb3ae, []int{18} } func (m *GetIndexConfigResponse) XXX_Unmarshal(b []byte) error { @@ -746,7 +962,7 @@ func (m *GetIndexStatsResponse) Reset() { *m = GetIndexStatsResponse{} } func (m *GetIndexStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetIndexStatsResponse) ProtoMessage() {} func (*GetIndexStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{16} + return fileDescriptor_7b2daf652facb3ae, []int{19} } func (m *GetIndexStatsResponse) XXX_Unmarshal(b []byte) error { @@ -787,7 +1003,7 @@ func (m *Document) Reset() { *m = Document{} } func (m *Document) String() string { return proto.CompactTextString(m) } func (*Document) ProtoMessage() {} func (*Document) Descriptor() ([]byte, []int) { - return fileDescriptor_7b2daf652facb3ae, []int{17} + return fileDescriptor_7b2daf652facb3ae, []int{20} } func (m *Document) XXX_Unmarshal(b []byte) error { @@ -823,15 +1039,21 @@ func (m *Document) GetFields() *any.Any { } func init() { - proto.RegisterEnum("index.LivenessProbeResponse_State", LivenessProbeResponse_State_name, LivenessProbeResponse_State_value) - proto.RegisterEnum("index.ReadinessProbeResponse_State", ReadinessProbeResponse_State_name, ReadinessProbeResponse_State_value) - proto.RegisterType((*LivenessProbeResponse)(nil), "index.LivenessProbeResponse") - proto.RegisterType((*ReadinessProbeResponse)(nil), "index.ReadinessProbeResponse") - proto.RegisterType((*GetNodeRequest)(nil), "index.GetNodeRequest") - proto.RegisterType((*GetNodeResponse)(nil), "index.GetNodeResponse") - proto.RegisterType((*SetNodeRequest)(nil), "index.SetNodeRequest") - proto.RegisterType((*DeleteNodeRequest)(nil), "index.DeleteNodeRequest") - proto.RegisterType((*GetClusterResponse)(nil), "index.GetClusterResponse") + proto.RegisterEnum("index.NodeHealthCheckRequest_Probe", NodeHealthCheckRequest_Probe_name, NodeHealthCheckRequest_Probe_value) + proto.RegisterEnum("index.NodeHealthCheckResponse_State", NodeHealthCheckResponse_State_name, NodeHealthCheckResponse_State_value) + proto.RegisterEnum("index.Node_State", Node_State_name, Node_State_value) + proto.RegisterEnum("index.ClusterWatchResponse_Event", ClusterWatchResponse_Event_name, ClusterWatchResponse_Event_value) + proto.RegisterType((*NodeHealthCheckRequest)(nil), "index.NodeHealthCheckRequest") + proto.RegisterType((*NodeHealthCheckResponse)(nil), "index.NodeHealthCheckResponse") + proto.RegisterType((*Metadata)(nil), "index.Metadata") + proto.RegisterType((*Node)(nil), "index.Node") + proto.RegisterType((*Cluster)(nil), "index.Cluster") + proto.RegisterMapType((map[string]*Node)(nil), "index.Cluster.NodesEntry") + proto.RegisterType((*NodeInfoResponse)(nil), "index.NodeInfoResponse") + proto.RegisterType((*ClusterJoinRequest)(nil), "index.ClusterJoinRequest") + proto.RegisterType((*ClusterLeaveRequest)(nil), "index.ClusterLeaveRequest") + proto.RegisterType((*ClusterInfoResponse)(nil), "index.ClusterInfoResponse") + proto.RegisterType((*ClusterWatchResponse)(nil), "index.ClusterWatchResponse") proto.RegisterType((*GetDocumentRequest)(nil), "index.GetDocumentRequest") proto.RegisterType((*GetDocumentResponse)(nil), "index.GetDocumentResponse") proto.RegisterType((*IndexDocumentRequest)(nil), "index.IndexDocumentRequest") @@ -848,55 +1070,74 @@ func init() { func init() { proto.RegisterFile("protobuf/index/index.proto", fileDescriptor_7b2daf652facb3ae) } var fileDescriptor_7b2daf652facb3ae = []byte{ - // 755 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x6f, 0xd2, 0x50, - 0x14, 0x2d, 0x44, 0xc6, 0x76, 0x19, 0x88, 0x4f, 0x20, 0x5b, 0xb7, 0x25, 0xcb, 0x9b, 0xd1, 0x99, - 0x68, 0x31, 0x53, 0x33, 0x7f, 0x7d, 0x10, 0x07, 0xe2, 0xb2, 0x85, 0xb9, 0x32, 0xb7, 0x68, 0x62, - 0x48, 0xa1, 0x6f, 0xd0, 0x08, 0x7d, 0xc8, 0x7b, 0x35, 0x2e, 0xf1, 0x9b, 0x7f, 0xa3, 0xff, 0x8f, - 0x69, 0x5f, 0x5b, 0xda, 0xae, 0x2d, 0x4b, 0xf6, 0x85, 0xe4, 0xdd, 0x77, 0xee, 0x39, 0xf7, 0x5d, - 0xee, 0x3d, 0x29, 0xc8, 0xd3, 0x19, 0xe5, 0xb4, 0x6f, 0x5d, 0xd6, 0x0d, 0x53, 0x27, 0xbf, 0xc5, - 0xaf, 0xe2, 0x04, 0x51, 0xce, 0x39, 0xc8, 0xeb, 0x43, 0x4a, 0x87, 0x63, 0x52, 0xf7, 0x91, 0x9a, - 0x79, 0x25, 0x10, 0xf2, 0x46, 0xf4, 0x8a, 0x4c, 0xa6, 0xdc, 0xbd, 0xc4, 0x7f, 0xa0, 0x7a, 0x6c, - 0xfc, 0x22, 0x26, 0x61, 0xec, 0xf3, 0x8c, 0xf6, 0x89, 0x4a, 0xd8, 0x94, 0x9a, 0x8c, 0xa0, 0x57, - 0x90, 0x63, 0x5c, 0xe3, 0x64, 0x2d, 0xb3, 0x9d, 0xd9, 0x2d, 0xed, 0x61, 0x45, 0x88, 0xc6, 0x82, - 0x95, 0xae, 0x8d, 0x54, 0x45, 0x02, 0x7e, 0x0c, 0x39, 0xe7, 0x8c, 0x0a, 0x90, 0xff, 0xd2, 0x39, - 0xea, 0x9c, 0x5c, 0x74, 0xca, 0x12, 0x5a, 0x81, 0x5c, 0xe3, 0xf8, 0xf0, 0xbc, 0x55, 0xce, 0xa0, - 0x65, 0xb8, 0xd3, 0x6c, 0x35, 0x9a, 0xe5, 0x2c, 0xfe, 0x9b, 0x81, 0x9a, 0x4a, 0x34, 0xdd, 0xb8, - 0xae, 0xff, 0x3a, 0xac, 0xbf, 0xe3, 0xea, 0xc7, 0xa3, 0xc3, 0x05, 0x28, 0x49, 0x05, 0xa8, 0xad, - 0x46, 0xf3, 0x6b, 0x39, 0x83, 0x8a, 0xb0, 0xd2, 0x39, 0x39, 0xeb, 0x89, 0x63, 0x16, 0x6f, 0x43, - 0xa9, 0x4d, 0x78, 0x87, 0xea, 0x44, 0x25, 0x3f, 0x2d, 0xc2, 0x38, 0x2a, 0x41, 0xd6, 0xd0, 0x1d, - 0xe5, 0x15, 0x35, 0x6b, 0xe8, 0xf8, 0x3b, 0xdc, 0xf5, 0x11, 0x6e, 0x7d, 0x2f, 0x00, 0x4c, 0xaa, - 0x93, 0x03, 0x6a, 0x5e, 0x1a, 0x43, 0x07, 0x5a, 0xd8, 0xab, 0x28, 0xa2, 0xd5, 0x8a, 0xd7, 0x6a, - 0xa5, 0x61, 0x5e, 0xa9, 0x01, 0x1c, 0xaa, 0x78, 0xaf, 0xca, 0x3a, 0xdc, 0x6e, 0xc1, 0xe7, 0x50, - 0xea, 0xa6, 0x16, 0x10, 0x51, 0xcb, 0xde, 0x4c, 0x0d, 0xef, 0xc0, 0xbd, 0x26, 0x19, 0x13, 0x4e, - 0xd2, 0xde, 0xd6, 0x04, 0xd4, 0x26, 0xfc, 0x60, 0x6c, 0x31, 0x4e, 0x66, 0xfe, 0xf3, 0x14, 0xc8, - 0x0f, 0x44, 0x28, 0xf5, 0x6d, 0x1e, 0x08, 0x3f, 0x70, 0x58, 0x9a, 0x74, 0x60, 0x4d, 0x88, 0xc9, - 0x93, 0xb4, 0x0e, 0xe0, 0x7e, 0x08, 0xe5, 0x8a, 0x3d, 0x81, 0xa5, 0x4b, 0x83, 0x8c, 0x75, 0x96, - 0xaa, 0xe5, 0x62, 0xf0, 0x19, 0x54, 0x0e, 0xed, 0x59, 0x58, 0x20, 0x16, 0x60, 0xcd, 0xde, 0x80, - 0xf5, 0x29, 0x54, 0x23, 0xac, 0x6e, 0x71, 0x15, 0xc8, 0x0d, 0xa8, 0x65, 0x72, 0x87, 0x39, 0xa7, - 0x8a, 0x03, 0x7e, 0x04, 0x55, 0xd1, 0xda, 0x45, 0x4f, 0x56, 0xa0, 0x16, 0x05, 0xa6, 0x12, 0x1f, - 0x43, 0xb1, 0x4b, 0xb4, 0xd9, 0x60, 0xe4, 0x11, 0xbe, 0x85, 0x12, 0x73, 0x02, 0xbd, 0x99, 0x88, - 0xa4, 0x36, 0xa9, 0xc8, 0x82, 0xc9, 0xf8, 0xc8, 0x9e, 0x2c, 0x11, 0xf0, 0xf7, 0xaa, 0xe8, 0xd3, - 0x31, 0x6b, 0x9c, 0xce, 0xb6, 0xea, 0xb1, 0xd9, 0x48, 0x7c, 0x0a, 0xb5, 0x36, 0xe1, 0x4e, 0x97, - 0xc4, 0x80, 0xf9, 0xa4, 0xfb, 0xb0, 0xea, 0xac, 0x67, 0x6f, 0xb0, 0x78, 0x1d, 0x0a, 0xc6, 0x9c, - 0x00, 0x77, 0xa0, 0xea, 0x51, 0xda, 0x2b, 0xcb, 0x7c, 0xc6, 0x97, 0x20, 0x70, 0x3d, 0x7b, 0x43, - 0xd2, 0xe7, 0x02, 0x0c, 0x3f, 0x1d, 0x7f, 0x82, 0x65, 0xaf, 0xcf, 0xb7, 0x9b, 0x87, 0xbd, 0x7f, - 0x79, 0xc8, 0x39, 0x75, 0xa1, 0x36, 0x14, 0x43, 0xae, 0x87, 0x6a, 0xd7, 0x12, 0x5b, 0xb6, 0xa3, - 0xca, 0x9b, 0x69, 0x1e, 0x89, 0x25, 0x74, 0x08, 0xa5, 0xb0, 0x7d, 0x25, 0x32, 0x6d, 0xa5, 0xba, - 0x1d, 0x96, 0xd0, 0x1b, 0xc8, 0xbb, 0x86, 0x84, 0xaa, 0x2e, 0x36, 0x6c, 0x61, 0x72, 0x2d, 0x1a, - 0x0e, 0xe6, 0x76, 0x23, 0xb9, 0xdd, 0x68, 0x6e, 0x6c, 0x59, 0x58, 0x42, 0xef, 0x01, 0xe6, 0x8e, - 0x82, 0xd6, 0xdc, 0xf4, 0x6b, 0x26, 0x93, 0xc2, 0xd0, 0x00, 0x98, 0xdb, 0x4d, 0x62, 0x03, 0xd6, - 0xe7, 0xd5, 0x47, 0x9c, 0x09, 0x4b, 0xa8, 0x05, 0xab, 0x17, 0x1a, 0x1f, 0x8c, 0x6e, 0x43, 0xf2, - 0x2c, 0x83, 0x3e, 0x42, 0x21, 0x60, 0x46, 0x28, 0x80, 0x8e, 0xec, 0xb4, 0x2c, 0xc7, 0x5d, 0xf9, - 0xe5, 0x74, 0xa0, 0x18, 0x72, 0x0e, 0xb4, 0xe1, 0xc2, 0xe3, 0x5c, 0xca, 0x1f, 0x92, 0x58, 0xb3, - 0xc1, 0xd2, 0x6e, 0x06, 0x9d, 0x42, 0x29, 0xec, 0x18, 0x68, 0x33, 0xd4, 0xe7, 0x28, 0xe3, 0x56, - 0xc2, 0x6d, 0x80, 0x72, 0x1f, 0x96, 0x84, 0x0d, 0xa0, 0x8a, 0xff, 0x8f, 0x07, 0x6c, 0x42, 0xae, - 0x46, 0xa2, 0xc1, 0x91, 0x0d, 0xaf, 0xfc, 0xc2, 0x91, 0x8d, 0x77, 0x08, 0x2c, 0xd9, 0x6b, 0x14, - 0x5a, 0xf5, 0x85, 0x6b, 0x14, 0x6b, 0x0c, 0x58, 0x42, 0xef, 0x60, 0xb9, 0x6b, 0x6a, 0x53, 0x36, - 0xa2, 0x3c, 0x91, 0x23, 0x71, 0xfe, 0x3e, 0xec, 0x7e, 0x7b, 0x38, 0x34, 0xf8, 0xc8, 0xea, 0x2b, - 0x03, 0x3a, 0xa9, 0x4f, 0x28, 0xb3, 0x7e, 0x68, 0xf5, 0xfe, 0x58, 0x63, 0xbc, 0x1e, 0xfe, 0xca, - 0xea, 0x2f, 0x39, 0xe7, 0xe7, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x23, 0xe9, 0x66, 0x7e, - 0x09, 0x00, 0x00, + // 1067 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x5d, 0x53, 0xdb, 0x46, + 0x14, 0xb5, 0x6c, 0x0b, 0x9c, 0x6b, 0x6c, 0xdc, 0x0d, 0x90, 0x44, 0x09, 0x6d, 0xd8, 0xa6, 0x8d, + 0x67, 0xda, 0xda, 0x1d, 0x32, 0x4c, 0x9a, 0xb4, 0x9d, 0x8e, 0x63, 0x29, 0xe0, 0xe0, 0x0a, 0x22, + 0x43, 0x98, 0xf4, 0x85, 0x91, 0xad, 0x05, 0x6b, 0x30, 0x92, 0x6b, 0xad, 0x99, 0xf2, 0xd8, 0xd7, + 0xfe, 0x92, 0xf6, 0xe7, 0xf4, 0xbd, 0x3f, 0xa6, 0xb3, 0x1f, 0x12, 0x92, 0xb0, 0x44, 0x67, 0xfa, + 0xc2, 0xb0, 0x77, 0xcf, 0x3d, 0x7b, 0xee, 0xdd, 0xbb, 0x47, 0x06, 0x6d, 0x3a, 0xf3, 0xa9, 0x3f, + 0x9c, 0x9f, 0xb5, 0x5d, 0xcf, 0x21, 0xbf, 0x89, 0xbf, 0x2d, 0x1e, 0x44, 0x2a, 0x5f, 0x68, 0x8f, + 0xce, 0x7d, 0xff, 0x7c, 0x42, 0xda, 0x11, 0xd2, 0xf6, 0xae, 0x05, 0x42, 0x7b, 0x9c, 0xde, 0x22, + 0x97, 0x53, 0x2a, 0x37, 0xf1, 0x1f, 0x0a, 0x6c, 0x98, 0xbe, 0x43, 0xf6, 0x88, 0x3d, 0xa1, 0xe3, + 0xee, 0x98, 0x8c, 0x2e, 0x2c, 0xf2, 0xeb, 0x9c, 0x04, 0x14, 0xbd, 0x02, 0x75, 0x3a, 0xf3, 0x87, + 0xe4, 0xa1, 0xf2, 0x54, 0x69, 0xd6, 0xb7, 0x3f, 0x6f, 0x89, 0x63, 0x17, 0xa3, 0x5b, 0x87, 0x0c, + 0x6a, 0x89, 0x0c, 0xbc, 0x03, 0x2a, 0x5f, 0xa3, 0x55, 0xa8, 0xee, 0x19, 0x9d, 0xfe, 0xd1, 0x5e, + 0xcf, 0x34, 0x06, 0x83, 0x46, 0x01, 0xad, 0x40, 0xa5, 0xdf, 0xfb, 0x60, 0xf0, 0x95, 0x82, 0x6a, + 0x70, 0xcf, 0x32, 0x3a, 0xba, 0xd8, 0x2c, 0xe2, 0xbf, 0x14, 0x78, 0x70, 0x8b, 0x3e, 0x98, 0xfa, + 0x5e, 0x40, 0xd0, 0x6b, 0x50, 0x03, 0x6a, 0xd3, 0x50, 0xcd, 0xb3, 0x2c, 0x35, 0x02, 0xde, 0x1a, + 0x30, 0xac, 0x25, 0x52, 0xb0, 0x05, 0x2a, 0x5f, 0xa3, 0x2a, 0x2c, 0x0b, 0x39, 0x1f, 0x1b, 0x05, + 0x76, 0xf8, 0xb1, 0x19, 0x2e, 0x15, 0x74, 0x0f, 0xd4, 0x0e, 0x93, 0xd6, 0x28, 0xa2, 0x0a, 0x94, + 0x75, 0xa3, 0xa3, 0x37, 0x4a, 0x2c, 0xc8, 0x04, 0x7e, 0x6c, 0x94, 0x19, 0xdc, 0x3c, 0x38, 0x3a, + 0x15, 0x4b, 0x15, 0x1f, 0x42, 0xe5, 0x67, 0x42, 0x6d, 0xc7, 0xa6, 0x36, 0xda, 0x82, 0x95, 0xf3, + 0xd9, 0x74, 0x74, 0x6a, 0x3b, 0xce, 0x8c, 0x04, 0x01, 0x97, 0x78, 0xcf, 0xaa, 0xb2, 0x58, 0x47, + 0x84, 0x18, 0x64, 0x4c, 0xe9, 0x34, 0x82, 0x14, 0x05, 0x84, 0xc5, 0x24, 0x04, 0xff, 0xa3, 0x40, + 0x99, 0x95, 0x83, 0xea, 0x50, 0x74, 0x1d, 0x49, 0x52, 0x74, 0x1d, 0x96, 0x3b, 0x74, 0x3d, 0x27, + 0x9d, 0xcb, 0x62, 0x21, 0xfd, 0xf3, 0xb0, 0x3b, 0x25, 0xde, 0x9d, 0x4f, 0x62, 0xdd, 0x49, 0xb4, + 0x02, 0x7d, 0x05, 0x95, 0x4b, 0x29, 0xfb, 0x61, 0xf9, 0xa9, 0xd2, 0xac, 0x6e, 0xaf, 0x4a, 0x6c, + 0x58, 0x8d, 0x15, 0x01, 0xf0, 0x7e, 0xac, 0x6f, 0xc7, 0xe6, 0xbe, 0x79, 0x70, 0x62, 0x8a, 0x2b, + 0x7c, 0x7b, 0xd0, 0xef, 0x1f, 0x9c, 0x18, 0x96, 0xb8, 0xc2, 0x6e, 0xc7, 0xd4, 0x7b, 0x7a, 0xe7, + 0x88, 0xb5, 0x0e, 0x60, 0xa9, 0x6f, 0x74, 0x74, 0xc3, 0x6a, 0x94, 0x18, 0x70, 0xb0, 0x77, 0x7c, + 0xa4, 0xb3, 0xb4, 0x32, 0xfe, 0x5d, 0x81, 0xe5, 0xee, 0x64, 0x1e, 0x50, 0x32, 0x43, 0x6d, 0x50, + 0x3d, 0xdf, 0x21, 0xac, 0x53, 0xa5, 0x66, 0x75, 0xfb, 0x91, 0x94, 0x20, 0xb7, 0xb9, 0xec, 0xc0, + 0xf0, 0xe8, 0xec, 0xda, 0x12, 0x38, 0xcd, 0x00, 0xb8, 0x09, 0xa2, 0x06, 0x94, 0x2e, 0xc8, 0xb5, + 0xec, 0x10, 0xfb, 0x17, 0x6d, 0x81, 0x7a, 0x65, 0x4f, 0xe6, 0x84, 0xf7, 0xa6, 0xba, 0x5d, 0x8d, + 0xd5, 0x6f, 0x89, 0x9d, 0xd7, 0xc5, 0xef, 0x14, 0xfc, 0x02, 0x1a, 0x2c, 0xd4, 0xf3, 0xce, 0xfc, + 0x68, 0xb0, 0x3e, 0x83, 0x32, 0x3b, 0x83, 0xb3, 0xa5, 0x32, 0xf9, 0x06, 0xde, 0x01, 0x24, 0x85, + 0xbd, 0xf3, 0x5d, 0x2f, 0x7c, 0x1d, 0x77, 0xa6, 0x7d, 0x01, 0xf7, 0x65, 0x5a, 0x9f, 0xd8, 0x57, + 0x24, 0xcc, 0x4b, 0x5d, 0x2e, 0xfe, 0x29, 0x82, 0x25, 0x54, 0x35, 0x61, 0x79, 0x24, 0xc2, 0xf2, + 0x84, 0x7a, 0xb2, 0x47, 0x56, 0xb8, 0x8d, 0xff, 0x56, 0x60, 0x4d, 0x06, 0x4f, 0x6c, 0x3a, 0x1a, + 0x47, 0x14, 0x2f, 0x41, 0x25, 0x57, 0xc4, 0xa3, 0xf2, 0xc5, 0x6c, 0x25, 0x09, 0x12, 0xd8, 0x96, + 0xc1, 0x80, 0x96, 0xc0, 0x47, 0xa5, 0x15, 0x33, 0x4a, 0x8b, 0x8b, 0x2b, 0xe5, 0x8b, 0xdb, 0x01, + 0x95, 0x53, 0x27, 0x27, 0xa8, 0x02, 0xe5, 0x77, 0x07, 0x3d, 0x53, 0x3c, 0xba, 0xbe, 0xd1, 0xf9, + 0x20, 0x27, 0xe7, 0xf8, 0x90, 0x4f, 0x51, 0x09, 0x3f, 0x03, 0xb4, 0x4b, 0xa8, 0xee, 0x8f, 0xe6, + 0x97, 0x4c, 0x57, 0x46, 0xeb, 0xba, 0x70, 0x3f, 0x81, 0x92, 0x75, 0x7f, 0x0d, 0x4b, 0x67, 0x2e, + 0x99, 0x38, 0x81, 0xec, 0xdc, 0x5a, 0x4b, 0x18, 0x60, 0x2b, 0x34, 0xc0, 0x56, 0xc7, 0xbb, 0xb6, + 0x24, 0x06, 0x1f, 0xc1, 0x5a, 0x8f, 0x69, 0xbf, 0xe3, 0xb0, 0x18, 0x6b, 0xf1, 0x3f, 0xb0, 0x7e, + 0x03, 0xeb, 0x29, 0x56, 0x29, 0x6e, 0x0d, 0xd4, 0x91, 0x3f, 0x97, 0x97, 0xa2, 0x5a, 0x62, 0x81, + 0x9f, 0xc3, 0xba, 0x4e, 0x26, 0x84, 0x92, 0xbb, 0x4a, 0x6e, 0xc1, 0x46, 0x1a, 0x98, 0x4b, 0xdc, + 0x87, 0xda, 0x80, 0xd8, 0x33, 0x76, 0xd3, 0x82, 0xf0, 0x7b, 0xa8, 0x07, 0x3c, 0x70, 0x3a, 0x13, + 0x91, 0xdc, 0x26, 0xd5, 0x82, 0x78, 0x32, 0xde, 0x87, 0x7a, 0xc8, 0x26, 0x4f, 0x7d, 0x05, 0xb5, + 0x88, 0x2e, 0x98, 0x4f, 0xf2, 0xd9, 0x56, 0x42, 0x36, 0x86, 0xc4, 0xef, 0x61, 0x63, 0x97, 0x50, + 0xde, 0xa5, 0xae, 0xef, 0x9d, 0xb9, 0xe7, 0xb1, 0xc1, 0x5d, 0xe1, 0xe3, 0x74, 0x3a, 0xe2, 0xf1, + 0x5c, 0xce, 0xaa, 0x7b, 0x43, 0x80, 0x4d, 0x58, 0x0f, 0x29, 0x99, 0x6f, 0x05, 0x11, 0xe3, 0x0e, + 0x08, 0xdc, 0x29, 0x33, 0xc1, 0xfc, 0xb9, 0x00, 0x37, 0x4a, 0xc7, 0x7b, 0x50, 0x09, 0xfb, 0xfc, + 0xff, 0xe6, 0x61, 0xfb, 0xcf, 0x65, 0x50, 0xb9, 0x2e, 0x64, 0xc1, 0x6a, 0xea, 0x9b, 0x85, 0x36, + 0x73, 0xbf, 0xac, 0xda, 0xa7, 0xf9, 0x9f, 0x3a, 0x5c, 0x40, 0x3f, 0x42, 0x25, 0xb4, 0x35, 0xb4, + 0x71, 0x4b, 0x87, 0xc1, 0x3e, 0xf7, 0xda, 0x83, 0x18, 0x4b, 0xdc, 0x69, 0x70, 0x01, 0xbd, 0x81, + 0x6a, 0xcc, 0xe0, 0x50, 0xca, 0x8d, 0x63, 0xa6, 0xa7, 0x65, 0x90, 0xe3, 0x02, 0xd2, 0x61, 0x25, + 0xee, 0x76, 0x48, 0x4b, 0x92, 0xc4, 0x2d, 0x30, 0x87, 0xa5, 0x1b, 0x29, 0xc9, 0xad, 0x25, 0x45, + 0x9e, 0x2a, 0x67, 0x37, 0x92, 0xc2, 0x3d, 0x2e, 0x93, 0xe5, 0x71, 0x8e, 0x21, 0xe2, 0xc2, 0xb7, + 0x0a, 0x7a, 0x0b, 0xd5, 0x98, 0xbf, 0x44, 0x7d, 0xb9, 0xed, 0x4c, 0x91, 0xa0, 0x05, 0x76, 0x84, + 0x0b, 0xc8, 0x84, 0x5a, 0xc2, 0x0c, 0x50, 0x78, 0xf2, 0x22, 0xe3, 0xd1, 0x9e, 0x2c, 0xde, 0x0c, + 0xd9, 0x9a, 0x0a, 0x7a, 0x0f, 0xf5, 0xa4, 0x09, 0xa0, 0x30, 0x67, 0xa1, 0x89, 0x68, 0x9b, 0x19, + 0xbb, 0x31, 0xca, 0x97, 0xb0, 0x24, 0x5e, 0x36, 0x5a, 0x93, 0xe0, 0x84, 0x6d, 0x68, 0xeb, 0xa9, + 0x68, 0x54, 0x5b, 0x0f, 0xea, 0xc9, 0x57, 0x9c, 0xd9, 0xee, 0xcd, 0x9b, 0x1e, 0x2d, 0x78, 0xf4, + 0xfc, 0xde, 0x6a, 0x89, 0xd7, 0x9b, 0xc9, 0xf4, 0x24, 0xc5, 0x94, 0x78, 0xeb, 0xb8, 0x80, 0x7e, + 0x80, 0xca, 0xc0, 0xb3, 0xa7, 0xc1, 0xd8, 0xa7, 0x99, 0x1c, 0x99, 0x33, 0xf8, 0xa6, 0xf9, 0xcb, + 0x97, 0xe7, 0x2e, 0x1d, 0xcf, 0x87, 0xad, 0x91, 0x7f, 0xd9, 0xbe, 0xf4, 0x83, 0xf9, 0x85, 0xdd, + 0x1e, 0x4e, 0xec, 0x80, 0xb6, 0x93, 0x3f, 0xc3, 0x87, 0x4b, 0x7c, 0xfd, 0xe2, 0xdf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x05, 0xe3, 0xab, 0x2e, 0x9f, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -911,13 +1152,12 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type IndexClient interface { - LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) - ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) - GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) - SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) - DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) - GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) - WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClusterClient, error) + NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) + NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) + ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) + ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) + ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) + ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) GetDocument(ctx context.Context, in *GetDocumentRequest, opts ...grpc.CallOption) (*GetDocumentResponse, error) IndexDocument(ctx context.Context, opts ...grpc.CallOption) (Index_IndexDocumentClient, error) DeleteDocument(ctx context.Context, opts ...grpc.CallOption) (Index_DeleteDocumentClient, error) @@ -935,66 +1175,57 @@ func NewIndexClient(cc *grpc.ClientConn) IndexClient { return &indexClient{cc} } -func (c *indexClient) LivenessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LivenessProbeResponse, error) { - out := new(LivenessProbeResponse) - err := c.cc.Invoke(ctx, "/index.Index/LivenessProbe", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *indexClient) ReadinessProbe(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ReadinessProbeResponse, error) { - out := new(ReadinessProbeResponse) - err := c.cc.Invoke(ctx, "/index.Index/ReadinessProbe", in, out, opts...) +func (c *indexClient) NodeHealthCheck(ctx context.Context, in *NodeHealthCheckRequest, opts ...grpc.CallOption) (*NodeHealthCheckResponse, error) { + out := new(NodeHealthCheckResponse) + err := c.cc.Invoke(ctx, "/index.Index/NodeHealthCheck", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { - out := new(GetNodeResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetNode", in, out, opts...) +func (c *indexClient) NodeInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*NodeInfoResponse, error) { + out := new(NodeInfoResponse) + err := c.cc.Invoke(ctx, "/index.Index/NodeInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) SetNode(ctx context.Context, in *SetNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *indexClient) ClusterJoin(ctx context.Context, in *ClusterJoinRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/SetNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/ClusterJoin", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) { +func (c *indexClient) ClusterLeave(ctx context.Context, in *ClusterLeaveRequest, opts ...grpc.CallOption) (*empty.Empty, error) { out := new(empty.Empty) - err := c.cc.Invoke(ctx, "/index.Index/DeleteNode", in, out, opts...) + err := c.cc.Invoke(ctx, "/index.Index/ClusterLeave", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) GetCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetClusterResponse, error) { - out := new(GetClusterResponse) - err := c.cc.Invoke(ctx, "/index.Index/GetCluster", in, out, opts...) +func (c *indexClient) ClusterInfo(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ClusterInfoResponse, error) { + out := new(ClusterInfoResponse) + err := c.cc.Invoke(ctx, "/index.Index/ClusterInfo", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *indexClient) WatchCluster(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_WatchClusterClient, error) { - stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/WatchCluster", opts...) +func (c *indexClient) ClusterWatch(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (Index_ClusterWatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Index_serviceDesc.Streams[0], "/index.Index/ClusterWatch", opts...) if err != nil { return nil, err } - x := &indexWatchClusterClient{stream} + x := &indexClusterWatchClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -1004,17 +1235,17 @@ func (c *indexClient) WatchCluster(ctx context.Context, in *empty.Empty, opts .. return x, nil } -type Index_WatchClusterClient interface { - Recv() (*GetClusterResponse, error) +type Index_ClusterWatchClient interface { + Recv() (*ClusterWatchResponse, error) grpc.ClientStream } -type indexWatchClusterClient struct { +type indexClusterWatchClient struct { grpc.ClientStream } -func (x *indexWatchClusterClient) Recv() (*GetClusterResponse, error) { - m := new(GetClusterResponse) +func (x *indexClusterWatchClient) Recv() (*ClusterWatchResponse, error) { + m := new(ClusterWatchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -1136,13 +1367,12 @@ func (c *indexClient) Snapshot(ctx context.Context, in *empty.Empty, opts ...grp // IndexServer is the server API for Index service. type IndexServer interface { - LivenessProbe(context.Context, *empty.Empty) (*LivenessProbeResponse, error) - ReadinessProbe(context.Context, *empty.Empty) (*ReadinessProbeResponse, error) - GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) - SetNode(context.Context, *SetNodeRequest) (*empty.Empty, error) - DeleteNode(context.Context, *DeleteNodeRequest) (*empty.Empty, error) - GetCluster(context.Context, *empty.Empty) (*GetClusterResponse, error) - WatchCluster(*empty.Empty, Index_WatchClusterServer) error + NodeHealthCheck(context.Context, *NodeHealthCheckRequest) (*NodeHealthCheckResponse, error) + NodeInfo(context.Context, *empty.Empty) (*NodeInfoResponse, error) + ClusterJoin(context.Context, *ClusterJoinRequest) (*empty.Empty, error) + ClusterLeave(context.Context, *ClusterLeaveRequest) (*empty.Empty, error) + ClusterInfo(context.Context, *empty.Empty) (*ClusterInfoResponse, error) + ClusterWatch(*empty.Empty, Index_ClusterWatchServer) error GetDocument(context.Context, *GetDocumentRequest) (*GetDocumentResponse, error) IndexDocument(Index_IndexDocumentServer) error DeleteDocument(Index_DeleteDocumentServer) error @@ -1156,132 +1386,114 @@ func RegisterIndexServer(s *grpc.Server, srv IndexServer) { s.RegisterService(&_Index_serviceDesc, srv) } -func _Index_LivenessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(empty.Empty) +func _Index_NodeHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeHealthCheckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).LivenessProbe(ctx, in) + return srv.(IndexServer).NodeHealthCheck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/LivenessProbe", + FullMethod: "/index.Index/NodeHealthCheck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).LivenessProbe(ctx, req.(*empty.Empty)) + return srv.(IndexServer).NodeHealthCheck(ctx, req.(*NodeHealthCheckRequest)) } return interceptor(ctx, in, info, handler) } -func _Index_ReadinessProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_NodeInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).ReadinessProbe(ctx, in) + return srv.(IndexServer).NodeInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/ReadinessProbe", + FullMethod: "/index.Index/NodeInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).ReadinessProbe(ctx, req.(*empty.Empty)) + return srv.(IndexServer).NodeInfo(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Index_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNodeRequest) +func _Index_ClusterJoin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterJoinRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).GetNode(ctx, in) + return srv.(IndexServer).ClusterJoin(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/GetNode", + FullMethod: "/index.Index/ClusterJoin", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetNode(ctx, req.(*GetNodeRequest)) + return srv.(IndexServer).ClusterJoin(ctx, req.(*ClusterJoinRequest)) } return interceptor(ctx, in, info, handler) } -func _Index_SetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetNodeRequest) +func _Index_ClusterLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ClusterLeaveRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).SetNode(ctx, in) + return srv.(IndexServer).ClusterLeave(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/SetNode", + FullMethod: "/index.Index/ClusterLeave", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).SetNode(ctx, req.(*SetNodeRequest)) + return srv.(IndexServer).ClusterLeave(ctx, req.(*ClusterLeaveRequest)) } return interceptor(ctx, in, info, handler) } -func _Index_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNodeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IndexServer).DeleteNode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/index.Index/DeleteNode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).DeleteNode(ctx, req.(*DeleteNodeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Index_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _Index_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(empty.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(IndexServer).GetCluster(ctx, in) + return srv.(IndexServer).ClusterInfo(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/index.Index/GetCluster", + FullMethod: "/index.Index/ClusterInfo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IndexServer).GetCluster(ctx, req.(*empty.Empty)) + return srv.(IndexServer).ClusterInfo(ctx, req.(*empty.Empty)) } return interceptor(ctx, in, info, handler) } -func _Index_WatchCluster_Handler(srv interface{}, stream grpc.ServerStream) error { +func _Index_ClusterWatch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(empty.Empty) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(IndexServer).WatchCluster(m, &indexWatchClusterServer{stream}) + return srv.(IndexServer).ClusterWatch(m, &indexClusterWatchServer{stream}) } -type Index_WatchClusterServer interface { - Send(*GetClusterResponse) error +type Index_ClusterWatchServer interface { + Send(*ClusterWatchResponse) error grpc.ServerStream } -type indexWatchClusterServer struct { +type indexClusterWatchServer struct { grpc.ServerStream } -func (x *indexWatchClusterServer) Send(m *GetClusterResponse) error { +func (x *indexClusterWatchServer) Send(m *ClusterWatchResponse) error { return x.ServerStream.SendMsg(m) } @@ -1432,28 +1644,24 @@ var _Index_serviceDesc = grpc.ServiceDesc{ HandlerType: (*IndexServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "LivenessProbe", - Handler: _Index_LivenessProbe_Handler, - }, - { - MethodName: "ReadinessProbe", - Handler: _Index_ReadinessProbe_Handler, + MethodName: "NodeHealthCheck", + Handler: _Index_NodeHealthCheck_Handler, }, { - MethodName: "GetNode", - Handler: _Index_GetNode_Handler, + MethodName: "NodeInfo", + Handler: _Index_NodeInfo_Handler, }, { - MethodName: "SetNode", - Handler: _Index_SetNode_Handler, + MethodName: "ClusterJoin", + Handler: _Index_ClusterJoin_Handler, }, { - MethodName: "DeleteNode", - Handler: _Index_DeleteNode_Handler, + MethodName: "ClusterLeave", + Handler: _Index_ClusterLeave_Handler, }, { - MethodName: "GetCluster", - Handler: _Index_GetCluster_Handler, + MethodName: "ClusterInfo", + Handler: _Index_ClusterInfo_Handler, }, { MethodName: "GetDocument", @@ -1478,8 +1686,8 @@ var _Index_serviceDesc = grpc.ServiceDesc{ }, Streams: []grpc.StreamDesc{ { - StreamName: "WatchCluster", - Handler: _Index_WatchCluster_Handler, + StreamName: "ClusterWatch", + Handler: _Index_ClusterWatch_Handler, ServerStreams: true, }, { diff --git a/protobuf/index/index.proto b/protobuf/index/index.proto index 4629c22..bf354b0 100644 --- a/protobuf/index/index.proto +++ b/protobuf/index/index.proto @@ -22,14 +22,13 @@ package index; option go_package = "github.com/mosuka/blast/protobuf/index"; service Index { - rpc LivenessProbe (google.protobuf.Empty) returns (LivenessProbeResponse) {} - rpc ReadinessProbe (google.protobuf.Empty) returns (ReadinessProbeResponse) {} + rpc NodeHealthCheck (NodeHealthCheckRequest) returns (NodeHealthCheckResponse) {} + rpc NodeInfo (google.protobuf.Empty) returns (NodeInfoResponse) {} - rpc GetNode (GetNodeRequest) returns (GetNodeResponse) {} - rpc SetNode (SetNodeRequest) returns (google.protobuf.Empty) {} - rpc DeleteNode (DeleteNodeRequest) returns (google.protobuf.Empty) {} - rpc GetCluster (google.protobuf.Empty) returns (GetClusterResponse) {} - rpc WatchCluster (google.protobuf.Empty) returns (stream GetClusterResponse) {} + rpc ClusterJoin (ClusterJoinRequest) returns (google.protobuf.Empty) {} + rpc ClusterLeave (ClusterLeaveRequest) returns (google.protobuf.Empty) {} + rpc ClusterInfo (google.protobuf.Empty) returns (ClusterInfoResponse) {} + rpc ClusterWatch (google.protobuf.Empty) returns (stream ClusterWatchResponse) {} rpc GetDocument (GetDocumentRequest) returns (GetDocumentResponse) {} rpc IndexDocument (stream IndexDocumentRequest) returns (IndexDocumentResponse) {} @@ -40,51 +39,76 @@ service Index { rpc Snapshot (google.protobuf.Empty) returns (google.protobuf.Empty) {} } -// use for health check -message LivenessProbeResponse { +message NodeHealthCheckRequest { + enum Probe { + HEALTHINESS = 0; + LIVENESS = 1; + READINESS = 2; + } + Probe probe = 1; +} + +message NodeHealthCheckResponse { enum State { - UNKNOWN = 0; - ALIVE = 1; - DEAD = 2; + HEALTHY = 0; + UNHEALTHY = 1; + ALIVE = 2; + DEAD = 3; + READY = 4; + NOT_READY = 5; } State state = 1; } -// use for health check -message ReadinessProbeResponse { +message Metadata { + string grpc_address = 1; + string http_address = 2; +} + +message Node { enum State { UNKNOWN = 0; - READY = 1; - NOT_READY = 2; + FOLLOWER = 1; + CANDIDATE = 2; + LEADER = 3; + SHUTDOWN = 4; } - State state = 1; + string id = 1; + string bind_address = 2; + State state = 3; + Metadata metadata = 4; } -// use for raft cluster status -message GetNodeRequest { - string id = 1; +message Cluster { + map nodes = 1; } -// use for raft cluster status -message GetNodeResponse { - google.protobuf.Any nodeConfig = 1; - string state = 2; +message NodeInfoResponse { + Node node = 1; } -// use for raft cluster status -message SetNodeRequest { - string id = 1; - google.protobuf.Any nodeConfig = 2; +message ClusterJoinRequest { + Node node = 1; } -// use for raft cluster status -message DeleteNodeRequest { +message ClusterLeaveRequest { string id = 1; } -// use for raft cluster status -message GetClusterResponse { - google.protobuf.Any cluster = 1; +message ClusterInfoResponse { + Cluster cluster = 1; +} + +message ClusterWatchResponse { + enum Event { + UNKNOWN = 0; + JOIN = 1; + LEAVE = 2; + UPDATE = 3; + } + Event event = 1; + Node node = 2; + Cluster cluster = 3; } message GetDocumentRequest {